Python:爬取疫情每⽇数据
前⾔:
我最近⼀次更新已经是3⽉中旬的事情了,卫健委的数据格式⼀直在变,所以代码只能说仅供参考!
代码
import requests
from bs4 import BeautifulSoup
import datetime
import re
from selenium import webdriver
import time
import xlwings as xw
def get_sh_data(url):
'''获得上海卫健委的数据'''
r = (url=url, headers=sh_headers)
sh_dict ={}
soup = ,'lxml')
# print(soup)
ivs_content = soup.find(name='div', attrs={'id':'ivs_content','class':'Article_content'})
new_text = _text()
# print(new_text)
sh_dict['累计排除疑似']= re.search('已累计排除疑似病例(\d+)例', new_text).group(1)
sh_dict['累计确诊']= re.search('发现确诊病例(\d+)例', new_text).group(1)
style2 ='(\d+)例病情危重,(\d+)例重症,(\d+)例治愈出院,(\d+)例死亡'
sh_dict['累计重症']=int(re.search(style2, new_text).group(1))+int(re.search(style2, new_text).group(2))
sh_dict['累计治愈']= re.search(style2, new_text).group(3)
sh_dict['累计死亡']= re.search(style2, new_text).group(4)
sh_dict['累计疑似']= re.search('尚有(\d+)例疑似病例正在排查中', new_text).group(1)
return sh_dict
def get_sh_today_news():
'''获得上海卫健委的新闻'''
url = r'v/xwfb/index.html'
r = (url=url, headers=sh_headers)
soup = ,'lxml')
# print(soup)
today_format = day().strftime('%Y-%m-%d')
today_sh_news = soup.find_all(name='span', text=today_format)
today_counts =len(today_sh_news)
for i in range(today_counts-1,-1,-1):
title = today_sh_news[i].find_previous_sibling(name='a').attrs['title']# 标题
href ='v'+ today_sh_news[i].find_previous_sibling(name='a').attrs['href']#⽹址
if title.startswith('上海新增'):
# print(title)
return get_sh_data(href)
def get_all_today_news():
'''获得国家卫健委的新闻'''
url ='v/xcs/yqtb/list_gzbd.shtml'
r = (url, headers=quanguo_headers)
soup = ,'lxml')
# print(soup)
today_format = day().strftime('%Y-%m-%d')
latest_news_title = soup.find(name='span', text=today_format).find_previous_sibling(name='a').attrs['title']
latest_news_href ='v'+ soup.find(name='span', text=today_format).find_previous_sibling(name='a').attrs['href'] # print(latest_news_href)
return get_all_today_data(latest_news_href)
def get_all_today_data(url):
'''获得国家卫健委的数据'''
r = (url, headers=quanguo_headers)
r = (url, headers=quanguo_headers)
all_dict ={}
hubei_dict ={}
soup = ,'lxml')
news = soup.find(name='p').get_text()
# print(news)
all_dict['新增疑似']= re.search('新增疑似病例(\d+)例', news).group(1)
all_dict['累计疑似']= re.search('现有疑似病例(\d+)例', news).group(1)
all_dict['累计确诊']= re.search('累计报告确诊病例(\d+)例', news).group(1)
all_dict['累计重症']= re.search('其中重症病例(\d+)例', news).group(1)
all_dict['累计死亡']= re.search('累计死亡病例(\d+)例', news).group(1)
selenium获取cookieall_dict['累计治愈']= re.search('累计治愈出院病例(\d+)例', news).group(1)
hubei_dict['新增疑似']= re.search('新增疑似病例(\d+)例.*?(武汉(\d+)例', news).group(1)
hubei_dict['新增确诊']= re.search('湖北新增确诊病例(\d+)例.*?(武汉(\d+)例', news).group(1) hubei_dict['新增死亡']= re.search('新增死亡病例(\d+)例.*?(武汉(\d+)例', news).group(1)
hubei_dict['新增治愈']= re.search('新增治愈出院病例(\d+)例(武汉(\d+)例)', news).group(1) hubei_dict['累计重症']= re.search('其中重症病例(\d+)例.*?(武汉(\d+)例', news).group(1)
# print(all_dict, hubei_dict)
return all_dict, hubei_dict
def get_cookie(url):
driver = webdriver.Chrome()
<(url)
time.sleep(3)
cookies = _cookies()
driver.quit()
items =[]
for i in range(len(cookies)):
cookie_value = cookies[i]
item = cookie_value['name']+'='+ cookie_value['value']
items.append(item)
cookiestr ='; '.join(a for a in items)
return cookiestr
def get_into_excel():
'''把数据贴到excel⾥'''
app = xw.App(visible=True, add_book=False)
app.display_alerts =False
app.screen_updating =False
wb = app.books.open('新型冠状病毒每⽇数据.xlsx')
ws = wb.sheets['all']
max_row = ws.api.unt
ws.range('C'+str(max_row)).value = hubei_data['新增确诊']
ws.range('K'+str(max_row)).value = hubei_data['新增死亡']
ws.range('O'+str(max_row)).value = hubei_data['新增治愈']
ws.range('S'+str(max_row)).value = hubei_data['新增疑似']
ws.range('AA'+str(max_row)).value = hubei_data['累计重症']
ws.range('R'+str(max_row)).value = all_data['新增疑似']
ws.range('AL'+str(max_row)).value = all_data['累计疑似']
ws.range('V'+str(max_row)).value = all_data['累计确诊']
ws.range('Z'+str(max_row)).value = all_data['累计重症']
ws.range('AD'+str(max_row)).value = all_data['累计死亡']
ws.range('AH'+str(max_row)).value = all_data['累计治愈']
ws.range('AN'+str(max_row)).value = sh_data['累计排除疑似']
ws.range('Y'+str(max_row)).value = sh_data['累计确诊']
ws.range('AC'+str(max_row)).value = sh_data['累计重症']
ws.range('AK'+str(max_row)).value = sh_data['累计治愈']
ws.range('AG'+str(max_row)).value = sh_data['累计死亡']
ws.range('AM'+str(max_row)).value = sh_data['累计疑似']
wb.save()
wb.close()
wb.close()
app.quit()
if __name__ =="__main__":
sh_headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Cookie': get_cookie('v/xwfb/index.html'),
# 'Cookie': 'zh_choose=s; zh_choose=s; _gscu_2010802395=80620430ie0po683; yd_cookie=12f170fc-e368-4a662db5220af2d434160e259b2e3158 5efb; _ydclearance=2cd0a8873fd311efcda1c1aa-05fc-4001-a108-0e86b80b3fee-1580700296; _gscbrs_2010802395=1; _pk_ref.30.0806=%5B%22%22% 2C%22%22%2C1580693101%2C%22https%3A%2F%2Fwww.baidu%2Flink%3Furl%3DDVUbOETLyMZLC5c_V7RJRbAYPvyqaU3f2PCBi2-E6KC2QE FltdrKWGmhgA5NbC3c%26wd%3D%26eqid%3Df38b30250015e1c5000000045e365a8d%22%5D; _pk_ses.30.0806=*; _pk_id.30.0806=35b481da38abb5 62.1580620431.6.1580694952.1580693101.; _gscs_2010802395=80693100qds57e17|pv:6; AlteonP=ALa1BGHbHKyWUqcNUGRETw$$',
'Host':'v'
}
quanguo_headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Cookie':'oHAcoULcWCQb80S=pxzexGFCvyGV4xDkaMHSyjBmzwXn5O4vfCbxFCgMDfcBaKqsFU9FHstqjFY6wJt9; yfx_c_g_u_id_10006654=_ck200 20209283417867964364567575; _gscu_2059686908=81579037nbf5xc58; insert_cookie=67313298; yfx_f_l_v_t_10006654=f_t_1580606914774__r_t_15 81643181169__v_t_1581678949269__r_c_14; security_session_verify=a2efd6893c3ad08675db9b0f5c365ecf; oHAcoULcWCQb80T=4Ywh2qE8IiJP44Th dpW0fs7Yqi1Hwlh9RhJHrW2WVl536y4eCIgXxGh9M8IuYUqGUCCtBO5kBc2DB6Kewd3naLK_O2bK5W3w3pcqT.uX3asTXxC2SGBqy9eV2DoGB0ZXb4uTP zPGbXebmT6xIYxbAmGbm_kZVX_nUvBL4nkAuFAVvcGLBmXr8nsdEToXztqZUlYnTjn9niwHMcg3th7XhJvFS_tckqRq5bLpvS_IKPuYn2JLraIIejlErBhA5IQh yHXFekNynv5PYgpzu2PguGccrP3c_bcg1MFViQjKVhgs_B22Nv4NxdHdiIk9GdZDZBjQ',
'Host':'v'
}
#⼀、全国和湖北的数据
all_data, hubei_data, sh_data ={},{},{}
try:
all_data, hubei_data = get_all_today_news()
print('全国数据:{}\n'
'湖北数据:{}'.format(all_data, hubei_data))
except:
print('全国数据未更新')
#⼆、上海的数据
try:
sh_data = get_sh_today_news()
print('上海数据:{}'.format(sh_data))
except:
print('上海数据未更新')
#三、导出到excel⾥
if sh_data !={}and all_data !={}:
get_into_excel()
print('Excel刷新成功!')
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论