Python如何爬取⽂章和评论(基于Fiddler抓包分析)
背景说明
感觉算得是⽐较难爬的平台之⼀,不过⼀番折腾之后还是⼩有收获的。没有⽤Scrapy(估计爬太快也有反爬限制),但后⾯会开始整理写⼀些实战出来。简单介绍下本次的开发环境:python3
requests
psycopg2 (操作postgres数据库)
抓包分析
本次实战对抓取的没有限制,但不同每次抓取之前都要进⾏分析。打开Fiddler,将⼿机配置好相关代理,为避免⼲扰过多,这⾥给Fiddler加个过滤规则,只需要指定域名
mp.weixin.qq就好:
Fiddler配置Filter规则
平时关注的也⽐较多,本次实战以“36氪”为例,继续往下看:
“36氪”
右上⾓ -> 全部消息
在主页,右上⾓有三个实⼼圆点,点击进⼊消息界⾯,下滑到并点击“全部消息”,往下请求加载⼏次历史⽂章,然后回到Fiddler界⾯,不出意外的话应该可以看到这⼏次请求,可以看到返回的数据是json格式的,同时⽂章数据是以json字符串的形式定义在general_msg_list字段中:
⽂章列表抓包请求
分析⽂章列表接⼝
把请求URL和Cookie贴上来进⾏分析:
mp.weixin.qq/mp/profile_ext?action=getmsg&__biz=MzI2NDk5NzA0Mw==&f=json&offset=10&count=10&is_ok=1&scene=126&uin=777&key=777&pass_ticket=QhOypNwH5dAr5w6UgMjyBrTSOdMEUT86vWc73GANoziWFl8xJd1hIMbMZ82KgCpN& Cookie: pgv_pvid=2027337976; pgv_info=ssid=s3015512850; rewardsn=; wxtokenkey=777; wxuin=2089823341; devicetype=android-26; version=26070237; lang=zh_CN;pass_ticket=NDndxxaZ7p6Z9PYulWpLqMbI0i3ULFeCPIHBFu1sf5pX2IhkGfyxZ6b9JieSYR 下⾯把重要的参数说明⼀下,没提到的说明就不那么重要了:
__biz:相当于是当前的id(唯⼀固定标志)
offset:⽂章数据接⼝请求偏移量标志(从0开始),每次返回的json数据中会有下⼀次请求的offset,注意这⾥并不是按某些规则递增的
count:每次请求的数据量(亲测最多可以是10)
pass_ticket:可以理解是请求票据,⽽且隔⼀段时间后(⼤概⼏个⼩时)就会过期,这也是为什么⽐较难按固定规则进⾏抓取的原因
appmsg_token:同样理解为⾮固定有过期策略的票据
Cookie:使⽤的时候可以把整段贴上去,但最少仅需要wap_sid2这部分
是不是感觉有点⿇烦,毕竟不是要搞⼤规模专业的爬⾍,所以单就⼀个这么分析下来,还是可以往下继续的,贴上截取的⼀段json数据,⽤于设计⽂章数据表:
{
"ret": 0,
"errmsg": "ok",
"msg_count": 10,
"can_msg_continue": 1,
"general_msg_list": "{\"list\":[{\"comm_msg_info\":{\"id\":1000005700,\"type\":49,\"datetime\":1535100
943,\"fakeid\":\"3264997043\",\"status\":2,\"content\":\"\"},\"app_msg_ext_info\":{\"title\":\"⾦融危机⼜⼗年:钱荒之下,⼆⼿基⾦迎来⾼光时刻\",\"digest\":\"退出"next_offset": 20,
"video_count": 1,
"use_video_tab": 1,
"real_type": 0
}delete的用法及短语>maven里repository文件在哪
可以简单抽取想要的数据,这⾥将⽂章表结构定义如下,顺便贴上建表的SQL语句:
⽂章数据表
-- ----------------------------
-- Table structure for tb_article
-- ----------------------------
DROP TABLE IF EXISTS "public"."tb_article";
CREATE TABLE "public"."tb_article" (
"id" serial4 PRIMARY KEY,
"msg_id" int8 NOT NULL,
"title" varchar(200) COLLATE "pg_catalog"."default" NOT NULL,
"author" varchar(20) COLLATE "pg_catalog"."default",
"cover" varchar(500) COLLATE "pg_catalog"."default",
"digest" varchar(200) COLLATE "pg_catalog"."default",
"source_url" varchar(800) COLLATE "pg_catalog"."default",
"content_url" varchar(600) COLLATE "pg_catalog"."default" NOT NULL,
"post_time" timestamp(6),
"create_time" timestamp(6) NOT NULL
)
;
COMMENT ON COLUMN "public"."tb_article"."id" IS '⾃增主键';
COMMENT ON COLUMN "public"."tb_article"."msg_id" IS '消息id (唯⼀)';
COMMENT ON COLUMN "public"."tb_article"."title" IS '标题';
COMMENT ON COLUMN "public"."tb_article"."author" IS '作者';
COMMENT ON COLUMN "public"."tb_article"."cover" IS '封⾯图';
COMMENT ON COLUMN "public"."tb_article"."digest" IS '关键字';
COMMENT ON COLUMN "public"."tb_article"."source_url" IS '原⽂地址';
COMMENT ON COLUMN "public"."tb_article"."content_url" IS '⽂章地址';
COMMENT ON COLUMN "public"."tb_article"."post_time" IS '发布时间';
COMMENT ON COLUMN "public"."tb_article"."create_time" IS '⼊库时间';
COMMENT ON TABLE "public"."tb_article" IS '⽂章表';
-- ----------------------------
-- Indexes structure for table tb_article
-- ----------------------------
CREATE UNIQUE INDEX "unique_msg_id" ON "public"."tb_article" USING btree (
"msg_id" "pg_catalog"."int8_ops" ASC NULLS LAST
);
附请求⽂章接⼝并解析数据保存到数据库的相关代码:
class WxMps(object):
"""⽂章、评论抓取爬⾍"""
def __init__(self, _biz, _pass_ticket, _app_msg_token, _cookie, _offset=0):
self.offset = _offset
self.biz = _biz # 标志
self.msg_token = _app_msg_token # 票据(⾮固定)
self.pass_ticket = _pass_ticket # 票据(⾮固定)
self.headers = {
'Cookie': _cookie, # Cookie(⾮固定)
'User-Agent': 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 '
}
wx_mps = 'wxmps' # 这⾥数据库、⽤户、密码⼀致(需替换成实际的)
self.postgres = pgs.Pgs(host='localhost', port='5432', db_name=wx_mps, user=wx_mps, password=wx_mps)
def start(self):
"""请求获取的⽂章接⼝"""
offset = self.offset
while True:
api = 'mp.weixin.qq/mp/profile_ext?action=getmsg&__biz={0}&f=json&offset={1}' \
'&count=10&is_ok=1&scene=124&uin=777&key=777&pass_ticket={2}&wxtoken=&appmsg_token' \
'={3}&x5=1&f=json'.format(self.biz, offset, self.pass_ticket, self.msg_token)
resp = (api, headers=self.headers).json()
ret, status = ('ret'), ('errmsg') # 状态信息
三大期刊数据库是什么
python请求并解析json数据if ret == 0 or status == 'ok':
print('Crawl article: ' + api)
offset = resp['next_offset'] # 下⼀次请求偏移量
general_msg_list = resp['general_msg_list']
msg_list = json.loads(general_msg_list)['list'] # 获取⽂章列表
for msg in msg_list:
comm_msg_info = msg['comm_msg_info'] # 该数据是本次推送多篇⽂章公共的
msg_id = comm_msg_info['id'] # ⽂章id
post_time = datetime.fromtimestamp(comm_msg_info['datetime']) # 发布时间
# msg_type = comm_msg_info['type'] # ⽂章类型
# msg_data = json.dumps(comm_msg_info, ensure_ascii=False) # msg原数据
app_msg_ext_info = ('app_msg_ext_info') # article原数据
if app_msg_ext_info:
# 本次推送的⾸条⽂章
self._parse_articles(app_msg_ext_info, msg_id, post_time)
# 本次推送的其余⽂章
multi_app_msg_item_list = app_msg_('multi_app_msg_item_list')
if multi_app_msg_item_list:
for item in multi_app_msg_item_list:
msg_id = item['fileid'] # ⽂章id
if msg_id == 0:
msg_id = int(time.time() * 1000) # 设置唯⼀id,解决部分⽂章id=0出现唯⼀索引冲突的情况
self._parse_articles(item, msg_id, post_time)
print('next offset is %d' % offset)
else:
print('Before break , Current offset is %d' % offset)
break
def _parse_articles(self, info, msg_id, post_time):
"""解析嵌套⽂章数据并保存⼊库"""
title = ('title') # 标题
cover = ('cover') # 封⾯图
author = ('author') # 作者
digest = ('digest') # 关键字
source_url = ('source_url') # 原⽂地址
content_url = ('content_url') # 地址
# ext_data = json.dumps(info, ensure_ascii=False) # 原始数据
self.postgres.handler(self._save_article(), (msg_id, title, author, cover, digest,
source_url, content_url, post_time,
@staticmethod
def _save_article():
sql = 'insert into tb_article(msg_id,title,author,cover,digest,source_url,content_url,post_time,create_time) ' \
'values(%s,%s,%s,%s,%s,%s,%s,%s,%s)'
return sql
if __name__ == '__main__':
biz = 'MzI2NDk5NzA0Mw==' # "36氪"
pass_ticket = 'NDndxxaZ7p6Z9PYulWpLqMbI0i3ULFeCPIHBFu1sf5pX2IhkGfyxZ6b9JieSYRUy'
app_msg_token = '971_Z0lVNQBcGsWColSubRO9H13ZjrPhjuljyxLtiQ~~'
cookie = 'wap_sid2=CO3YwOQHEogBQnN4VTNhNmxQWmc3UHI2U3kteWhUeVExZHFVMnN0QXlsbzVJRUJKc1pkdVFUU2Y5UzhSVEtOZmt1VVlYTkR4SEllQ2huejlTTThJWndMQzZfYUw2SldLVGVMQUthUjc3QWdVMUdoaGN0Nml2SU05cXR1dTN2RkhRUVd # 以上信息不同每次抓取都需要借助抓包⼯具做修改
wxMps = WxMps(biz, pass_ticket, app_msg_token, cookie)
wxMps.start() # 开始爬取⽂章
分析⽂章评论接⼝
获取评论的思路⼤致是⼀样的,只是会更加⿇烦⼀点。⾸先在⼿机端点开⼀篇有评论的⽂章,然后查看Fiddler抓取的请求:
⽂章评论
⽂章评论接⼝抓包请求
提取其中的URL和Cookie再次分析:
mp.weixin.qq/mp/appmsg_comment?action=getcomment&scene=0&__biz=MzI2NDk5NzA0Mw==&appmsgid=2247518723&idx=1&comment_id=433253969406607362&offset=0&limit=100&uin=777&key=777&pass_ticket=NDndxxaZ7p6Z9PYulWpL Cookie: pgv_pvid=2027337976; pgv_info=ssid=s3015512850; rewardsn=; wxuin=2089823341; devicetype=android-26; version=26070237; lang=zh_CN; pass_ticket=NDndxxaZ7p6Z9PYulWpLqMbI0i3ULFeCPIHBFu1sf5pX2IhkGfyxZ6b9JieSYRUy; wap_sid2=CO 接着分析参数:
__biz:同上
pass_ticket:同上
Cookie:同上
offset和limit:代表偏移量和请求数量,由于评论最多展⽰100条,所以这两个参数也不⽤改它
comment_id:获取⽂章评论数据的标记id,固定但需要从当前⽂章结构(Html)解析提取
appmsgid:票据id,⾮固定每次需要从当前⽂章结构(Html)解析提取
appmsg_token:票据token,⾮固定每次需要从当前⽂章结构(Html)解析提取
可以看到最后三个参数要解析html获取(当初真的了好久才想到看⽂章⽹页结构)。从⽂章请求接⼝可以获得⽂章地址,对应上⾯的content_url字段,但请求该地址前仍需要对url做相关处理,不然上⾯三
个参数会有缺失,也就获取不到后⾯评论内容:
def _parse_article_detail(self, content_url, article_id):
"""从⽂章页提取相关参数⽤于获取评论,article_id是已保存的⽂章id"""
try:
api = place('amp;', '').replace('#wechat_redirect', '').replace('http', 'https')
html = (api, headers=self.headers).text
except:
print('获取评论失败' + content_url)
else:
# group(0) is current line
str_comment = re.search(r'var comment_id = "(.*)" \|\| "(.*)" \* 1;', html)
str_msg = re.search(r"var appmsgid = '' \|\| '(.*)'\|\|", html)
str_token = re.search(r'window.appmsg_token = "(.*)";', html)
if str_comment and str_msg and str_token:
comment_id = up(1) # 评论id(固定)
app_msg_id = up(1) # 票据id(⾮固定)
appmsg_token = up(1) # 票据token(⾮固定)
再回来看该接⼝返回的json数据,分析结构后然后定义数据表(含SQL):
⽂章评论数据表
-- ----------------------------
-- Table structure for tb_article_comment
-
- ----------------------------
DROP TABLE IF EXISTS "public"."tb_article_comment";
CREATE TABLE "public"."tb_article_comment" (
"id" serial4 PRIMARY KEY,
"article_id" int4 NOT NULL,
"comment_id" varchar(50) COLLATE "pg_catalog"."default",
"nick_name" varchar(50) COLLATE "pg_catalog"."default" NOT NULL,
"logo_url" varchar(300) COLLATE "pg_catalog"."default",
"content_id" varchar(50) COLLATE "pg_catalog"."default" NOT NULL,
"content" varchar(3000) COLLATE "pg_catalog"."default" NOT NULL,
"like_num" int2,
"comment_time" timestamp(6),
"create_time" timestamp(6) NOT NULL
)
;
COMMENT ON COLUMN "public"."tb_article_comment"."id" IS '⾃增主键';
COMMENT ON COLUMN "public"."tb_article_comment"."article_id" IS '⽂章外键id';
COMMENT ON COLUMN "public"."tb_article_comment"."comment_id" IS '评论接⼝id';
COMMENT ON COLUMN "public"."tb_article_comment"."nick_name" IS '⽤户昵称';
COMMENT ON COLUMN "public"."tb_article_comment"."logo_url" IS '头像地址';
COMMENT ON COLUMN "public"."tb_article_comment"."content_id" IS '评论id (唯⼀)';
COMMENT ON COLUMN "public"."tb_article_comment"."content" IS '评论内容';
COMMENT ON COLUMN "public"."tb_article_comment"."like_num" IS '点赞数';
COMMENT ON COLUMN "public"."tb_article_comment"."comment_time" IS '评论时间';
COMMENT ON COLUMN "public"."tb_article_comment"."create_time" IS '⼊库时间';
COMMENT ON TABLE "public"."tb_article_comment" IS '⽂章评论表';
-- ----------------------------
-- Indexes structure for table tb_article_comment
-- ----------------------------
CREATE UNIQUE INDEX "unique_content_id" ON "public"."tb_article_comment" USING btree (
"content_id" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
);
万⾥长征快到头了,最后贴上这部分代码,由于要先获取⽂章地址,所以和上⾯获取⽂章数据的代码是⼀起的:import json
import re
import time
from datetime import datetime
import requests
from utils import pgs
class WxMps(object):
"""⽂章、评论抓取爬⾍"""
def __init__(self, _biz, _pass_ticket, _app_msg_token, _cookie, _offset=0):
self.offset = _offset
self.biz = _biz # 标志
self.msg_token = _app_msg_token # 票据(⾮固定)
self.pass_ticket = _pass_ticket # 票据(⾮固定)
self.headers = {
'Cookie': _cookie, # Cookie(⾮固定)
'User-Agent': 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 '
}
wx_mps = 'wxmps' # 这⾥数据库、⽤户、密码⼀致(需替换成实际的)
self.postgres = pgs.Pgs(host='localhost', port='5432', db_name=wx_mps, user=wx_mps, password=wx_mps)
def start(self):
duplication什么意思"""请求获取的⽂章接⼝"""
offset = self.offset
while True:
api = 'mp.weixin.qq/mp/profile_ext?action=getmsg&__biz={0}&f=json&offset={1}' \
'&count=10&is_ok=1&scene=124&uin=777&key=777&pass_ticket={2}&wxtoken=&appmsg_token' \
'={3}&x5=1&f=json'.format(self.biz, offset, self.pass_ticket, self.msg_token)
resp = (api, headers=self.headers).json()
ret, status = ('ret'), ('errmsg') # 状态信息
if ret == 0 or status == 'ok':
print('Crawl article: ' + api)
offset = resp['next_offset'] # 下⼀次请求偏移量
general_msg_list = resp['general_msg_list']
msg_list = json.loads(general_msg_list)['list'] # 获取⽂章列表
for msg in msg_list:
comm_msg_info = msg['comm_msg_info'] # 该数据是本次推送多篇⽂章公共的
msg_id = comm_msg_info['id'] # ⽂章id
post_time = datetime.fromtimestamp(comm_msg_info['datetime']) # 发布时间
# msg_type = comm_msg_info['type'] # ⽂章类型
# msg_data = json.dumps(comm_msg_info, ensure_ascii=False) # msg原数据
app_msg_ext_info = ('app_msg_ext_info') # article原数据
if app_msg_ext_info:
# 本次推送的⾸条⽂章
self._parse_articles(app_msg_ext_info, msg_id, post_time)
# 本次推送的其余⽂章
multi_app_msg_item_list = app_msg_('multi_app_msg_item_list')
if multi_app_msg_item_list:
for item in multi_app_msg_item_list:
msg_id = item['fileid'] # ⽂章id
if msg_id == 0:
msg_id = int(time.time() * 1000) # 设置唯⼀id,解决部分⽂章id=0出现唯⼀索引冲突的情况
self._parse_articles(item, msg_id, post_time)
print('next offset is %d' % offset)
else:
print('Before break , Current offset is %d' % offset)
break
def _parse_articles(self, info, msg_id, post_time):
"""解析嵌套⽂章数据并保存⼊库"""
title = ('title') # 标题
cover = ('cover') # 封⾯图
author = ('author') # 作者
digest = ('digest') # 关键字
source_url = ('source_url') # 原⽂地址
content_url = ('content_url') # 地址
# ext_data = json.dumps(info, ensure_ascii=False) # 原始数据
content_url = place('amp;', '').replace('#wechat_redirect', '').replace('http', 'https')
article_id = self.postgres.handler(self._save_article(), (msg_id, title, author, cover, digest,
source_url, content_url, post_time,
if article_id:
self._parse_article_detail(content_url, article_id)
def _parse_article_detail(self, content_url, article_id):
"""从⽂章页提取相关参数⽤于获取评论,article_id是已保存的⽂章id"""
try:
html = (content_url, headers=self.headers).text
except:
print('获取评论失败' + content_url)
else:
# group(0) is current line
str_comment = re.search(r'var comment_id = "(.*)" \|\| "(.*)" \* 1;', html)
str_msg = re.search(r"var appmsgid = '' \|\| '(.*)'\|\|", html)
str_token = re.search(r'window.appmsg_token = "(.*)";', html)
if str_comment and str_msg and str_token:
comment_id = up(1) # 评论id(固定)
app_msg_id = up(1) # 票据id(⾮固定)
appmsg_token = up(1) # 票据token(⾮固定)
# 缺⼀不可
if appmsg_token and app_msg_id and comment_id:
print('Crawl article comments: ' + content_url)
self._crawl_comments(app_msg_id, comment_id, appmsg_token, article_id)
def _crawl_comments(self, app_msg_id, comment_id, appmsg_token, article_id):
"""抓取⽂章的评论"""
api = 'mp.weixin.qq/mp/appmsg_comment?action=getcomment&scene=0&__biz={0}' \
'&appmsgid={1}&idx=1&comment_id={2}&offset=0&limit=100&uin=777&key=777' \
'&pass_ticket={3}&wxtoken=777&devicetype=android-26&clientversion=26060739' \
'&appmsg_token={4}&x5=1&f=json'.format(self.biz, app_msg_id, comment_id,
self.pass_ticket, appmsg_token)
resp = (api, headers=self.headers).json()
ret, status = resp['base_resp']['ret'], resp['base_resp']['errmsg']
if ret == 0 or status == 'ok':
elected_comment = resp['elected_comment']
for comment in elected_comment:
nick_name = ('nick_name') # 昵称
logo_url = ('logo_url') # 头像
comment_time = datetime.('create_time')) # 评论时间
content = ('content') # 评论内容
content_id = ('content_id') # id
like_num = ('like_num') # 点赞数
# reply_list = ('reply')['reply_list'] # 回复数据
self.postgres.handler(self._save_article_comment(), (article_id, comment_id, nick_name, logo_url,
content_id, content, like_num, comment_time,
@staticmethod
def _save_article():
sql = 'insert into tb_article(msg_id,title,author,cover,digest,source_url,content_url,post_time,create_time) ' \
'values(%s,%s,%s,%s,%s,%s,%s,%s,%s) returning id'
return sql
@staticmethod
def _save_article_comment():
sql = 'insert into tb_article_comment(article_id,comment_id,nick_name,logo_url,content_id,content,like_num,' \
'comment_time,create_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)'
return sql
if __name__ == '__main__':异步电机 同步电机
biz = 'MzI2NDk5NzA0Mw==' # "36氪"
pass_ticket = 'NDndxxaZ7p6Z9PYulWpLqMbI0i3ULFeCPIHBFu1sf5pX2IhkGfyxZ6b9JieSYRUy'
app_msg_token = '971_Z0lVNQBcGsWColSubRO9H13ZjrPhjuljyxLtiQ~~'
cookie = 'wap_sid2=CO3YwOQHEogBQnN4VTNhNmxQWmc3UHI2U3kteWhUeVExZHFVMnN0QXlsbzVJRUJKc1pkdVFUU2Y5UzhSVEtOZmt1VVlYTkR4SEllQ2huejlTTThJWndMQzZfYUw2SldLVGVMQUthUjc3QWdVMUdoaGN0Nml2SU05cXR1dTN2RkhRUVd # 以上信息不同每次抓取都需要借助抓包⼯具做修改
wxMps = WxMps(biz, pass_ticket, app_msg_token, cookie)
wxMps.start() # 开始爬取⽂章及评论
⽂末⼩结
最后展⽰下数据库⾥的数据,单线程爬的慢⽽且⼜没这⽅⾯的数据需求,所以也只是随便试了下⼿:
抓取的部分数据
有时候写爬⾍是个细⼼活,如果觉得太⿇烦的话,推荐了解下这个⼯具。有问题的欢迎底部留⾔讨论。
完整代码:
以上就是本⽂的全部内容,希望对⼤家的学习有所帮助,也希望⼤家多多⽀持。

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。