Python爬取腾讯疫情实时数据并存储到mysql数据库
思路:
在腾讯疫情数据⽹站F12解析⽹站结构,使⽤Python爬取当⽇疫情数据和历史疫情数据,分别存储到details和history两个mysql表。
①此⽅法⽤于爬取每⽇详细疫情数据
程序员客栈网站1import requests
2import json
3import time
4def get_details():
5 url = 'view.inews.qq/g2/getOnsInfo?name=disease_h5&callback=jQuery34102848205531413024_1584924641755&_=1584924641756'
6 headers ={
7'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) C
hrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400
8 }
9 res = (url,headers=headers)
10#输出全部信息
11# )
12 response_data = json.eplace('jQuery34102848205531413024_1584924641755(','')[:-1])
13#输出这个字典的键值 dict_keys(['ret', 'data'])ret是响应值,0代表请求成功,data⾥是我们需要的数据
14# print(response_data.keys())
15"""上⾯已经转化过⼀次字典,然后获取⾥⾯的data,因为data是字符串,所以需要再次转化字典
16 print(json.loads(reponse_data['data']).keys())
17结果:
18 dict_keys(['lastUpdateTime', 'chinaTotal', 'chinaAdd', 'isShowAdd', 'showAddSwitch',
19 'areaTree', 'chinaDayList', 'chinaDayAddList', 'dailyNewAddHistory', 'dailyHistory',
20 'wuhanDayList', 'articleList'])
21 lastUpdateTime是最新更新时间,chinaTotal是全国疫情总数,chinaAdd是全国新增数据,
22 isShowAdd代表是否展⽰新增数据,showAddSwitch是显⽰哪些数据,areaTree中有全国疫情数据
23"""
24 areaTree_data = json.loads(response_data['data'])['areaTree']
25 temp=json.loads(response_data['data'])
26# print(temp.keys())
27# print(areaTree_data[0].keys())
28"""
29获取上⼀级字典⾥的areaTree
30然后查看⾥⾯中国键值
31 print(areaTree_data[0].keys())
32 dict_keys(['name', 'today', 'total', 'children'])
33 name代表国家名称,today代表今⽇数据,total代表总数,children⾥有全国各地数据,我们需要获取全国各地数据,查看children数据
34 print(areaTree_data[0]['children'])
35这⾥⾯是
36 name是地区名称,today是今⽇数据,total是总数,children是市级数据,
37我们通过这个接⼝可以获取每个地区的总数据。我们遍历这个列表,取出name,这个是省级的数据,还需要获取市级数据,
38需要取出name,children(市级数据)下的name、total(历史总数)下的confirm、heal、dead,today(今⽇数据)下的confirm(增加数),
39这些就是我们需要的数据
40"""
41# print(areaTree_data[0]['children'])
42# for province_data in areaTree_data[0]['children']:
43# print(province_data)
44
45 ds= temp['lastUpdateTime']
46 details=[]
47for pro_infos in areaTree_data[0]['children']:
access数据库程序设计是考什么48 province_name = pro_infos['name'] # 省名
49for city_infos in pro_infos['children']:
50 city_name = city_infos['name'] # 市名
51 confirm = city_infos['total']['confirm']#历史总数
52 confirm_add = city_infos['today']['confirm']#今⽇增加数
53 heal = city_infos['total']['heal']#治愈
54 dead = city_infos['total']['dead']#死亡
55# print(ds,province_name,city_name,confirm,confirm_add,heal,dead)
56 details.append([ds,province_name,city_name,confirm,confirm_add,heal,dead])
57return details
单独测试⽅法:
1# d=get_details()
2# print(d)
②此⽅法⽤于爬取历史详细数据
1import requests
2import json
rank函数怎么避免重复排名3import time
4def get_history():
5 url = 'view.inews.qq/g2/getOnsInfo?name=disease_other&callback=jQuery341026745307075030955_1584946267054&_=1584946267055'
6 headers ={
7'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400
8 }
9 res = (url,headers=headers)
10# )
11 response_data = json.eplace('jQuery341026745307075030955_1584946267054(','')[:-1])
12# print(response_data)
13 data = json.loads(response_data['data'])
14# print(data.keys())
15 chinaDayList = data['chinaDayList']#历史记录
16 chinaDayAddList = data['chinaDayAddList']#历史新增记录
17 history = {}
18for i in chinaDayList:
19 ds = '2021.' + i['date']#时间
20 tup = time.strptime(ds,'%Y.%m.%d')
21 ds = time.strftime('%Y-%m-%d',tup)#改变时间格式,插⼊数据库
22 confirm = i['confirm']
23 suspect = i['suspect']
24 heal = i['heal']
25 dead = i['dead']
26 history[ds] = {'confirm':confirm,'suspect':suspect,'heal':heal,'dead':dead}
27for i in chinaDayAddList:
28 ds = '2021.' + i['date']#时间
29 tup = time.strptime(ds,'%Y.%m.%d')
30 ds = time.strftime('%Y-%m-%d',tup)#改变时间格式,插⼊数据库
31 confirm_add = i['confirm']
32 suspect_add = i['suspect']
matlab实例10033 heal_add = i['heal']
34 dead_add = i['dead']
35 history[ds].update({'confirm_add':confirm_add,'suspect_add':suspect_add,'heal_add':heal_add,'dead_add':dead_add})
36return history
单独测试此⽅法:
1# h=get_history()
2# print(h)
③此⽅法⽤于数据库的连接与关闭:
1import time
2import pymysql
3import traceback
4def get_conn():
5"""
6 :return: 连接,游标
7"""
8# 创建连接
9 conn = t(host="127.0.0.1",
10 user="root",
11 password="000429",
12 db="mydb",
13 charset="utf8")
14# 创建游标
15 cursor = conn.cursor() # 执⾏完毕返回的结果集默认以元组显⽰
16return conn, cursor
17def close_conn(conn, cursor):
oracle安装很麻烦吗18if cursor:
19 cursor.close()
20if conn:
21 conn.close()
④此⽅法⽤于更新并插⼊每⽇详细数据到数据库表:
1def update_details():
2"""
3更新 details 表
4 :return:
5"""
6 cursor = None
7 conn = None
8try:
9 li = get_details()
10 conn, cursor = get_conn()
11 sql = "insert into details(update_time,province,city,confirm,confirm_add,heal,dead) values(%s,%s,%s,%s,%s,%s,%s)"
12 sql_query = 'select %s=(select update_time from details order by id desc limit 1)'#对⽐当前最⼤时间戳
13 ute(sql_query,li[0][0])
14if not cursor.fetchone()[0]:
15print(f"{time.asctime()}开始更新最新数据")
16for item in li:
17 ute(sql, item)
18 connmit() # 提交事务 update delete insert操作
19print(f"{time.asctime()}更新最新数据完毕")
20else:
21print(f"{time.asctime()}已是最新数据!")
22except:
23 traceback.print_exc()
python请求并解析json数据24finally:
25 close_conn(conn, cursor)
单独测试能否插⼊数据到details表:
1 update_details()
⑤此⽅法⽤于插⼊历史数据到history表
1def insert_history():
2"""
3插⼊历史数据
4 :return:
5"""
6 cursor = None
7 conn = None
8try:
9 dic = get_history()
10print(f"{time.asctime()}开始插⼊历史数据")
11 conn, cursor = get_conn()
12 sql = "insert into history values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
13for k, v in dic.items():
14# item 格式 {'2021-01-13': {'confirm': 41, 'suspect': 0, 'heal': 0, 'dead': 1}
15 ute(sql, [k, v.get("confirm"), v.get("confirm_add"), v.get("suspect"),
16 v.get("suspect_add"), v.get("heal"), v.get("heal_add"),
17 v.get("dead"), v.get("dead_add")])
18
19 connmit() # 提交事务 update delete insert操作
20print(f"{time.asctime()}插⼊历史数据完毕")
21except:
22 traceback.print_exc()
23finally:
24 close_conn(conn, cursor)
单独测试能否插⼊数据到history表:
1# insert_history()
⑥此⽅法⽤于根据时间来更新历史数据表的内容:
1def update_history():
2"""
3更新历史数据
4 :return:
5"""
6 cursor = None
7 conn = None
8try:
9 dic = get_history()
10print(f"{time.asctime()}开始更新历史数据")
11 conn, cursor = get_conn()
12 sql = "insert into history values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
13 sql_query = "select confirm from history where ds=%s"
14for k, v in dic.items():
15# item 格式 {'2020-01-13': {'confirm': 41, 'suspect': 0, 'heal': 0, 'dead': 1}
16if ute(sql_query, k):
17 ute(sql, [k, v.get("confirm"), v.get("confirm_add"), v.get("suspect"),
18 v.get("suspect_add"), v.get("heal"), v.get("heal_add"),
19 v.get("dead"), v.get("dead_add")])
20 connmit() # 提交事务 update delete insert操作
21print(f"{time.asctime()}历史数据更新完毕")
22except:
23 traceback.print_exc()
24finally:
25 close_conn(conn, cursor)
单独测试更新历史数据表的⽅法:
1# update_history()
最后是两个数据表的详细建⽴代码(也可以使⽤mysql可视化⼯具直接建⽴):
1create table history(
2 ds datetime not null comment '⽇期',
3confirm int(11) default null comment '累计确诊',
4 confirm_add int(11) default null comment '当⽇新增确诊',
5 suspect int(11) default null comment '剩余疑似',
6 suspect_add int(11) default null comment '当⽇新增疑似',
7 heal int(11) default null comment '累计治愈',
8 heal_add int(11) default null comment '当⽇新增治愈',
9 dead int(11) default null comment '累计死亡',
10 dead_add int(11) default null comment '当⽇新增死亡',
11primary key(ds) using btree
12 )engine=InnoDB DEFAULT charset=utf8mb4;
13create table details(
14 id int(11) not null auto_increment,
15 update_time datetime default null comment '数据最后更新时间',
16 province varchar(50) default null comment '省',
17 city varchar(50) default null comment '市',
18confirm int(11) default null comment '累计确诊',
19 confirm_add int(11) default null comment '新增确诊',
20 heal int(11) default null comment '累计治愈',
21 dead int(11) default null comment '累计死亡',
22primary key(id)
23 )engine=InnoDB default charset=utf8mb4;
Tomorrow the birds will sing.
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论