# -*- coding: utf-8 -*-
__author__ = 'Frank Li'
import requests
import json class HotSpider(object):
def __init__(self):
self.url = "https://m.douban.com/rexxar/api/v2/subject_collection/filter_tv_domestic_hot/items?os=android&for_mobile=1&start={}&count=18&loc_id=108288"
self.session = requests.session()
self.headers = {"Referer": "https://m.douban.com/tv/chinese",
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36"} def parse_2_list_from_str(self,url):
return json.loads(self.session.get(url,headers=self.headers).content.decode())['subject_collection_items'] def save_as_file(self,content_list,file):
with open(file,'a',encoding='utf-8') as f:
for content in content_list:
f.write(json.dumps(content,ensure_ascii=False))
f.write('\n') def run(self):
url = self.url.format(0)
num = 0
total = 500
while num<total+18:
print(url)
self.save_as_file(self.parse_2_list_from_str(url),'hot.json')
num+=18
url=self.url.format(num) if __name__ == '__main__':
hot_spider = HotSpider()
hot_spider.run()

使用 xpath 爬取正在热映的 电影保存为 json 文件

# -*- coding: utf-8 -*-
__author__ = 'Frank Li'
import requests
from lxml import etree
import json url = "https://movie.douban.com/cinema/nowplaying/changsha/"
headers = {"Referer":"https://movie.douban.com/cinema/nowplaying/changsha/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"}
sess = requests.session()
response = sess.get(url,headers=headers)
html_str = response.content.decode()
element = etree.HTML(html_str)
movie_img_list = element.xpath("//div[@class='mod-bd']/ul[@class='lists']//li[@class='poster']//img/@src")
movie_name_list = element.xpath("//div[@class='mod-bd']/ul[@class='lists']//li[@class='stitle']/a/@title")
movie_addr_list = element.xpath("//div[@class='mod-bd']/ul[@class='lists']//li[@class='stitle']/a/@href")
movie_score_list = element.xpath("//div[@class='mod-bd']/ul[@class='lists']//li[@class='srating']/span[@class='subject-rate']/text()") for name,img,addr,score in zip(movie_name_list,movie_img_list,movie_addr_list,movie_score_list):
item = {}
item['name'] = name
item['img'] = img
item['addr'] = addr
item['score'] = score
with open('movie.json','a',encoding='utf-8') as f:
item_json = json.dumps(item, ensure_ascii=False, indent=2)
print(item_json)
f.write(item_json)
f.write('\n')
f.flush()

保存下来的 movie.json 文件

{
"name": "碟中谍6:全面瓦解",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2529365085.jpg",
"addr": "https://movie.douban.com/subject/26336252/?from=playing_poster",
"score": "8.3"
}
{
"name": "阿尔法:狼伴归途",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2530871439.jpg",
"addr": "https://movie.douban.com/subject/26810318/?from=playing_poster",
"score": "6.5"
}
{
"name": "蚁人2:黄蜂女现身",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2529389608.jpg",
"addr": "https://movie.douban.com/subject/26636712/?from=playing_poster",
"score": "7.5"
}
{
"name": "传奇的诞生",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2531286907.jpg",
"addr": "https://movie.douban.com/subject/3073268/?from=playing_poster",
"score": "7.6"
}
{
"name": "快把我哥带走",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2531080870.jpg",
"addr": "https://movie.douban.com/subject/30122633/?from=playing_poster",
"score": "7.0"
}
{
"name": "道高一丈",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2530863118.jpg",
"addr": "https://movie.douban.com/subject/26954268/?from=playing_poster",
"score": "5.7"
}
{
"name": "李宗伟:败者为王",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2530870325.jpg",
"addr": "https://movie.douban.com/subject/27195119/?from=playing_poster",
"score": "7.1"
}
{
"name": "西虹市首富",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2529206747.jpg",
"addr": "https://movie.douban.com/subject/27605698/?from=playing_poster",
"score": "6.7"
}
{
"name": "一出好戏",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2529571873.jpg",
"addr": "https://movie.douban.com/subject/26985127/?from=playing_poster",
"score": "7.3"
}
{
"name": "精灵旅社3:疯狂假期",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2530591543.jpg",
"addr": "https://movie.douban.com/subject/26630714/?from=playing_poster",
"score": "6.9"
}
{
"name": "苏丹",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2529570494.jpg",
"addr": "https://movie.douban.com/subject/26728641/?from=playing_poster",
"score": "7.0"
}
{
"name": "巨齿鲨",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2530572643.jpg",
"addr": "https://movie.douban.com/subject/26426194/?from=playing_poster",
"score": "6.0"
}
{
"name": "藏北秘岭-重返无人区",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2532522676.jpg",
"addr": "https://movie.douban.com/subject/30208007/?from=playing_poster",
"score": "6.2"
}
{
"name": "那些女人",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2530146643.jpg",
"addr": "https://movie.douban.com/subject/26574965/?from=playing_poster",
"score": "5.3"
}
{
"name": "草戒指",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2531782507.jpg",
"addr": "https://movie.douban.com/subject/27204180/?from=playing_poster",
"score": "5.6"
}
{
"name": "吻隐者",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2531980221.jpg",
"addr": "https://movie.douban.com/subject/26928809/?from=playing_poster",
"score": "7.6"
}
{
"name": "禹神传之寻找神力",
"img": "https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2532781444.jpg",
"addr": "https://movie.douban.com/subject/30227727/?from=playing_poster",
"score": "6.6"
}
{
"name": "大师兄",
"img": "https://img1.doubanio.com/view/photo/s_ratio_poster/public/p2528842218.jpg",
"addr": "https://movie.douban.com/subject/27201353/?from=playing_poster",
"score": "6.2"
}

简单多线程 图片下载

import requests
from bs4 import BeautifulSoup
import os
import threading def download_img(src,target=None):
parent_dir = './img'
os.makedirs(parent_dir,exist_ok=True)
r = requests.get(src,stream=True)
target = src.split('/')[-1]
target = os.path.join(parent_dir,target)
print(threading.current_thread(),' start to download img: ',target)
with open(target,'wb') as tar_file:
for chunk in r.iter_content(chunk_size=128):
tar_file.write(chunk)
print('saved {}'.format(target)) if __name__ == '__main__':
URL = 'https://tieba.baidu.com/p/6034793219'
html = requests.get(URL).text
soup = BeautifulSoup(html,'lxml')
# print(html)
imgs = []
srcs = soup.find_all('img',{'class':'BDE_Image'})
for src in srcs:
imgs.append(src['src']) threads = []
for i,img in enumerate(imgs):
t = threading.Thread(target=download_img,args=(img,),name='Thread-{}'.format(i))
t.start()
threads.append(t)
for t in threads:
t.join()

python--爬取豆瓣热门国产电视剧保存为文件的更多相关文章

  1. requests库爬取豆瓣热门国产电视剧数据并保存到本地

    首先要做的就是去豆瓣网找对应的接口,这里就不赘述了,谷歌浏览器抓包即可,然后要做的就是分析返回的json数据的结构: https://movie.douban.com/j/search_subject ...

  2. 利用Python爬取豆瓣电影

    目标:使用Python爬取豆瓣电影并保存MongoDB数据库中 我们先来看一下通过浏览器的方式来筛选某些特定的电影: 我们把URL来复制出来分析分析: https://movie.douban.com ...

  3. Python爬取豆瓣指定书籍的短评

    Python爬取豆瓣指定书籍的短评 #!/usr/bin/python # coding=utf-8 import re import sys import time import random im ...

  4. Python爬取豆瓣《复仇者联盟3》评论并生成乖萌的格鲁特

    代码地址如下:http://www.demodashi.com/demo/13257.html 1. 需求说明 本项目基于Python爬虫,爬取豆瓣电影上关于复仇者联盟3的所有影评,并保存至本地文件. ...

  5. Python爬取豆瓣电影top

    Python爬取豆瓣电影top250 下面以四种方法去解析数据,前面三种以插件库来解析,第四种以正则表达式去解析. xpath pyquery beaufifulsoup re 爬取信息:名称  评分 ...

  6. python爬取豆瓣首页热门栏目详细流程

    记录一下爬取豆瓣热门专栏的经过,通过这篇文章,你能学会requests,HTMLParser,json的基本使用,以及爬取网页内容的基本思路. 使用模块 1,获取豆瓣首页代码:首先我们需要访问豆瓣页面 ...

  7. python 爬取豆瓣的美剧

    pc版大概有500条记录,mobile大概是50部,只有热门的,所以少一点 url构造很简单,主要参数就是page_limit与page_start,每翻一页,start+=20即可,tag是&quo ...

  8. python爬取豆瓣电影信息数据

    题外话+ 大家好啊,最近自己在做一个属于自己的博客网站(准备辞职回家养老了,明年再战)在家里 琐事也很多, 加上自己 一回到家就懒了(主要是家里冷啊! 广东十几度,老家几度,躲在被窝瑟瑟发抖,) 由于 ...

  9. python 爬取豆瓣电影短评并wordcloud生成词云图

    最近学到数据可视化到了词云图,正好学到爬虫,各种爬网站 [实验名称] 爬取豆瓣电影<千与千寻>的评论并生成词云 1. 利用爬虫获得电影评论的文本数据 2. 处理文本数据生成词云图 第一步, ...

随机推荐

  1. Google Apps的单点登录-谷歌使用的单点登录

    简述: Customer :客户 Google:谷歌 Identity Provider:身份提供者安全断言标记语言(英语:Security Assertion Markup Language,简称S ...

  2. 【agc030f】Permutation and Minimum(动态规划)

    [agc030f]Permutation and Minimum(动态规划) 题面 atcoder 给定一个长度为\(2n\)的残缺的排列\(A\),定义\(b_i=min\{A_{2i-1},A_{ ...

  3. AHOI中国象棋(dp)

    大力dp题. 每行每列最多放两个,考虑用行作为dp阶段. dp[i][j][k]表示i行,有一个的有j列,有两个的有k列. 然后就是分类讨论. 一个都不放,放一个在0出,放一个在1出,放两个在0,放两 ...

  4. 2018蓝桥杯 省赛D题(测试次数)

    x星球的居民脾气不太好,但好在他们生气的时候唯一的异常举动是:摔手机.各大厂商也就纷纷推出各种耐摔型手机.x星球的质监局规定了手机必须经过耐摔测试,并且评定出一个耐摔指数来,之后才允许上市流通.x星球 ...

  5. Docker中如何删除image(镜像)

    原文地址:http://yaxin-cn.github.io/Docker/how-to-delete-a-docker-image.html docker中删除images的命令是docker rm ...

  6. linux提取第一列且删除第一行(awk函数)

    如下文件所示,只想提取红框中的内容,即进行提取第一列,且去除第一行的操作 则用到下列命令行: awk 'NR == 1 {next} {print $1}' file.txt > file_co ...

  7. tail 命令只查看日志中的关键字所在行信息

    tail -f info_log-2019-04-20.log |grep 要查询的关键字

  8. 第十一节、Harris角点检测原理(附源码)

    OpenCV可以检测图像的主要特征,然后提取这些特征.使其成为图像描述符,这类似于人的眼睛和大脑.这些图像特征可作为图像搜索的数据库.此外,人们可以利用这些关键点将图像拼接起来,组成一个更大的图像,比 ...

  9. QSS网址

    http://blog.csdn.net/liang19890820/article/details/51691212 https://www.cnblogs.com/cy568searchx/p/3 ...

  10. Android studio自带的sample例子

    1. 直接查看Github上面的Sample Sample in GitHub:  点击打开链接 2. FQ下载(个人倾向于下载) 需要FQ工具 goagent goagent在windows下的安装 ...