#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : 爬取YY评级基本信息.py
# @Author: lattesea
# @Date : 2019/10/7
# @Desc :
import requests
import json
import csv
from fake_useragent import UserAgent
import time
import random class YYpingjiSpider(object):
def __init__(self):
self.url = 'https://api.ratingdog.cn/v1/search?limit=10&offset={}&type=3&qtext=&filter=%7B%7D&_=1570391570681'
self.url2 = 'https://api.ratingdog.cn/v1/GetIssuerInfo?IssuerID={}&IssuerType=1001'
self.url3 = 'https://api.ratingdog.cn/v1/GetIssuerInfo?IssuerID={}&IssuerType=1002' def get_headers(self):
ua = UserAgent()
headers = {
"Accept": "application/json, text/plain, */*",
"Origin": "https://www.ratingdog.cn",
"Referer": "https://www.ratingdog.cn/",
"Sec-Fetch-Mode": "cors",
"User-Agent": ua.random
}
return headers def parse_IssuerID_IssuerType(self, url):
IssuerID_list = []
html_json = requests.get(url=url, headers=self.get_headers()).text
html_py = json.loads(html_json)
for i in html_py['rows']:
IssuerID_list.append((i['IssuerID'], i['IssuerType']))
print(IssuerID_list)
return IssuerID_list def parse_basic_message_1002(self, IssuerID):
url = self.url3.format(IssuerID)
basic_message = {}
html_json = requests.get(url=url, headers=self.get_headers()).text
html_py = json.loads(html_json)
for i in html_py['rows']:
basic_message['IssuerName'] = html_py['rows']['IssuerName']
basic_message['CorporateRating'] = html_py['rows']['CorporateRating']
basic_message['RatingAgency'] = html_py['rows']['RatingAgency']
basic_message['Holder'] = html_py['rows']['Holder']
basic_message['Industry'] = html_py['rows']['Industry']
basic_message['Nature'] = html_py['rows']['Nature']
basic_message['YYRating'] = html_py['rows']['YYRating']
basic_message['IssuerType'] = html_py['rows']['IssuerType']
basic_message['CreditAnalysis'] = html_py['rows']['CreditAnalysis']
basic_message['PlatformImportance'] = html_py['rows']['CtExtendInfo']['PlatformImportance']
basic_message['PrincipalBusiness'] = html_py['rows']['CtExtendInfo']['PrincipalBusiness']
basic_message['GDP'] = html_py['rows']['CtExtendInfo']['GDP']
basic_message['Revenue'] = html_py['rows']['CtExtendInfo']['Revenue']
basic_message['YYRatio'] = html_py['rows']['CtExtendInfo']['YYRatio']
basic_message['IssuerCity'] = html_py['rows']['CtExtendInfo']['IssuerCity']
basic_message['ADLevel'] = html_py['rows']['CtExtendInfo']['ADLevel']
print(basic_message)
return basic_message def parse_basic_message_1001(self, IssuerID):
url = self.url2.format(IssuerID)
basic_message = {}
html_json = requests.get(url=url, headers=self.get_headers()).text
html_py = json.loads(html_json)
for i in html_py['rows']:
basic_message['IssuerName'] = html_py['rows']['IssuerName']
basic_message['CorporateRating'] = html_py['rows']['CorporateRating']
basic_message['RatingAgency'] = html_py['rows']['RatingAgency']
basic_message['Holder'] = html_py['rows']['Holder']
basic_message['Industry'] = html_py['rows']['Industry']
basic_message['Nature'] = html_py['rows']['Nature']
basic_message['YYRating'] = html_py['rows']['YYRating']
basic_message['IssuerType'] = html_py['rows']['IssuerType']
basic_message['CreditAnalysis'] = html_py['rows']['CreditAnalysis']
basic_message['YYIndustry'] = html_py['rows']['CyExtendInfo']['YYIndustry']
basic_message['YYIndustryId'] = html_py['rows']['CyExtendInfo']['YYIndustryId']
basic_message['IndustrylStatus'] = html_py['rows']['CyExtendInfo']['IndustrylStatus']
basic_message['ShareholderBackground'] = html_py['rows']['CyExtendInfo']['ShareholderBackground']
basic_message['OperatingStatus'] = html_py['rows']['CyExtendInfo']['OperatingStatus']
basic_message['FinancialStatus'] = html_py['rows']['CyExtendInfo']['FinancialStatus']
basic_message['Focus'] = html_py['rows']['CyExtendInfo']['Focus']
print(basic_message)
return basic_message def save_csv_1001(self, result):
keyword_list1 = ['IssuerName', 'CorporateRating', 'RatingAgency', 'Holder', 'Industry', 'Nature', 'YYRating',
'IssuerType', 'CreditAnalysis', 'YYIndustry', 'YYIndustryId', 'IndustrylStatus',
'ShareholderBackground', 'OperatingStatus', 'FinancialStatus', 'Focus'] with open('1001.csv', 'a', newline='') as f:
writer = csv.DictWriter(f, keyword_list1)
# for row in result:
writer.writerow(result) def save_csv_1002(self, result):
keyword_list2 = ['IssuerName', 'CorporateRating', 'RatingAgency', 'Holder', 'Industry', 'Nature', 'YYRating',
'IssuerType', 'CreditAnalysis', 'PlatformImportance', 'PrincipalBusiness', 'PrincipalBusiness',
'GDP', 'Revenue', 'YYRatio', 'IssuerCity', 'ADLevel'] with open('1002.csv', 'a', newline='') as f:
writer = csv.DictWriter(f, keyword_list2)
# for row in result:
writer.writerow(result) def run(self):
# self.parse_IssuerID()
# self.parse_basic_message_1001()
for i in range(0, 4631, 20):
url = self.url.format(i)
IssuerID_IssuerType = self.parse_IssuerID_IssuerType(url)
for j in IssuerID_IssuerType: if j[1] == '产业':
result = self.parse_basic_message_1001(j[0])
self.save_csv_1001(result)
elif j[1] == '城投':
result = self.parse_basic_message_1002(j[0])
self.save_csv_1002(result)
time.sleep(random.uniform(1, 4)) if __name__ == '__main__':
spider = YYpingjiSpider()
spider.run()

该网站主要是访问频率太高会被封账号

爬取YY评级信息的更多相关文章

  1. 【图文详解】scrapy爬虫与动态页面——爬取拉勾网职位信息(2)

    上次挖了一个坑,今天终于填上了,还记得之前我们做的拉勾爬虫吗?那时我们实现了一页的爬取,今天让我们再接再厉,实现多页爬取,顺便实现职位和公司的关键词搜索功能. 之前的内容就不再介绍了,不熟悉的请一定要 ...

  2. 爬取拉勾网招聘信息并使用xlwt存入Excel

    xlwt 1.3.0 xlwt 文档 xlrd 1.1.0 python操作excel之xlrd 1.Python模块介绍 - xlwt ,什么是xlwt? Python语言中,写入Excel文件的扩 ...

  3. 爬虫系列2:Requests+Xpath 爬取租房网站信息

    Requests+Xpath 爬取租房网站信息 [抓取]:参考前文 爬虫系列1:https://www.cnblogs.com/yizhiamumu/p/9451093.html [分页]:参考前文 ...

  4. python itchat 爬取微信好友信息

    原文链接:https://mp.weixin.qq.com/s/4EXgR4GkriTnAzVxluJxmg 「itchat」一个开源的微信个人接口,今天我们就用itchat爬取微信好友信息,无图言虚 ...

  5. 使用request爬取拉钩网信息

    通过cookies信息爬取 分析header和cookies 通过subtext粘贴处理header和cookies信息 处理后,方便粘贴到代码中 爬取拉钩信息代码 import requests c ...

  6. Scrapy实战篇(七)之Scrapy配合Selenium爬取京东商城信息(下)

    之前我们使用了selenium加Firefox作为下载中间件来实现爬取京东的商品信息.但是在大规模的爬取的时候,Firefox消耗资源比较多,因此我们希望换一种资源消耗更小的方法来爬取相关的信息. 下 ...

  7. 简单的python爬虫--爬取Taobao淘女郎信息

    最近在学Python的爬虫,顺便就练习了一下爬取淘宝上的淘女郎信息:手法简单,由于淘宝网站本上做了很多的防爬措施,应此效果不太好! 爬虫的入口:https://mm.taobao.com/json/r ...

  8. selenium+phantomjs爬取京东商品信息

    selenium+phantomjs爬取京东商品信息 今天自己实战写了个爬取京东商品信息,和上一篇的思路一样,附上链接:https://www.cnblogs.com/cany/p/10897618. ...

  9. Python爬虫-爬取京东商品信息-按给定关键词

    目的:按给定关键词爬取京东商品信息,并保存至mongodb. 字段:title.url.store.store_url.item_id.price.comments_count.comments 工具 ...

随机推荐

  1. SRS之HLS部署实例源码分析

    1. 综述 SRS 关于 HLS 的具体配置可见: HLS部署实例 SRS 关于 hls 的配置文件内容如下: listen 1935; max_connections 1000; daemon of ...

  2. PHP 验证5-20位数字加字母的正则(数字和字母缺一不可)!!!

    $pattern = '/^(?![0-9]+$)(?![a-zA-Z]+$)[0-9A-Za-z]{5,20}$/'; if(!preg_match($pattern,$username)){ re ...

  3. PHP 封装POD 类

    使用POD的过程 //1.造DSN:驱动名:dbname=数据库名;host=服务器地址 $dsn = "mysql:dbname=mydb;host=localhost"; // ...

  4. linux调用本地shell脚本

    package com.haiyisoft.cAssistant.adapter.rest; import java.io.BufferedReader;import java.io.File;imp ...

  5. vscode+python+flake8+cmder配置

    {     "window.zoomLevel": 0,     "[python]": {},     "kite.showWelcomeNotif ...

  6. 理解MVC/MVP/MVVM的区别

    转载至[http://www.ruanyifeng.com/blog/2015/02/mvcmvp_mvvm.html] MVC 所有的通信都是单向的. M(Model)V(View)C(Contro ...

  7. electron关于页面跳转 的问题

    刚开始看到页面跳转,大家一般会想到用 window.location.href = './index.html'; 这样的代码.结果是可以跳转,但 DOM事件 基本都会失效.到最后还是使用的 elec ...

  8. Cortex-M3 操作模式与特权等级

    Cortex-M3支持2个模式(Handler模式.线程模式)和2个特权等级(特权级.非特权级). 当处理器处在线程模式时,既可以使用特权级,也可以使用非特权级. 当处理器处在Handler模式时,总 ...

  9. vue.js 三种方式安装

    Vue.js(读音 /vjuː/, 类似于 view)是一个构建数据驱动的 web 界面的渐进式框架.Vue.js 的目标是通过尽可能简单的 API 实现响应的数据绑定和组合的视图组件.它不仅易于上手 ...

  10. 五十二:WTForms表单验证之基本使用

    作用:1.做表单验证,把用户提交的数据验证是否合法2.做模板渲染 安装:pip install wtforms 表单验证1.自定义一个表单类,继承wtforms.Form2.定义好需要验证的字段,字段 ...