import re
import time
import copy
import pickle
import requests
import argparse
'''微博爬虫类'''
class weibo():
def __init__(self, **kwargs):
self.login_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'Origin': 'https://passport.weibo.cn',
'Referer': 'https://passport.weibo.cn/signin/login?entry=mweibo&r=https%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt='
}
self.login_url = 'http://222.178.203.72:19005/whst/63/=oZrronqszvdhanzbm//sso/login'
self.home_url = 'http://222.178.203.72:19005/whst/63/=vdhanzbnl//'
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
}
self.session = requests.Session()
self.time_interval = 1.5
'''获取评论数据'''
def getComments(self, url, url_type='pc', max_page='all', savename=None, is_print=True, **kwargs):
# 判断max_page参数是否正确
if not isinstance(max_page, int):
if max_page != 'all':
raise ValueError('[max_page] error, weibo.getComments -> [max_page] should be <number(int) larger than 0> or <all>')
else:
if max_page < 1:
raise ValueError('[max_page] error, weibo.getComments -> [max_page] should be <number(int) larger than 0> or <all>')
# 判断链接类型
if url_type == 'phone':
mid = url.split('/')[-1]
elif url_type == 'pc':
mid = self.__getMid(url)
else:
raise ValueError('[url_type] error, weibo.getComments -> [url_type] should be <pc> or <phone>')
# 数据爬取
headers = copy.deepcopy(self.headers)
headers['Accept'] = 'application/json, text/plain, */*'
headers['MWeibo-Pwa'] = '1'
headers['Referer'] = 'https://m.weibo.cn/detail/%s' % mid
headers['X-Requested-With'] = 'XMLHttpRequest'
url = 'http://222.178.203.72:19005/whst/63/=lzvdhanzbm//comments/hotflow?id={}&mid={}&max_id_type=0'.format(mid, mid)
num_page = 0
comments_data = {}
while True:
num_page += 1
print('[INFO]: Start to get the comment data of page%d...' % num_page)
if num_page > 1:
url = 'http://222.178.203.72:19005/whst/63/=lzvdhanzbm//comments/hotflow?id={}&mid={}&max_id={}&max_id_type={}'.format(mid, mid, max_id, max_id_type)
res = self.session.get(url, headers=headers)
comments_data[num_page] = res.json()
if is_print:
print(res.json())
try:
max_id = res.json()['data']['max_id']
max_id_type = res.json()['data']['max_id_type']
except:
break
if isinstance(max_page, int):
if num_page < max_page:
time.sleep(self.time_interval)
else:
break
else:
if int(float(max_id)) != 0:
time.sleep(self.time_interval)
else:
break
if savename is None:
savename = 'comments_%s.pkl' % str(int(time.time()))
with open(savename, 'wb') as f:
pickle.dump(comments_data, f)
return True
'''模拟登陆'''
def login(self, username, password):
data = {
'username': username,
'password': password,
'savestate': '1',
'r': 'https://weibo.cn/',
'ec': '0',
'pagerefer': 'https://weibo.cn/pub/',
'entry': 'mweibo',
'wentry': '',
'loginfrom': '',
'client_id': '',
'code': '',
'qq': '',
'mainpageflag': '1',
'hff': '',
'hfp': ''
}
res = self.session.post(self.login_url, headers=self.login_headers, data=data)
if res.json()['retcode'] == 20000000:
self.session.headers.update(self.login_headers)
print('[INFO]: Account -> %s, login successfully...' % username)
return True
else:
raise RuntimeError('[INFO]: Account -> %s, fail to login, username or password error...' % username)
'''获取PC端某条微博的mid'''
def __getMid(self, pc_url):
headers = copy.deepcopy(self.headers)
headers['Cookie'] = 'SUB=_2AkMrLtDRf8NxqwJRmfgQzWzkZI11ygzEieKdciEKJRMxHRl-yj83qhAHtRB6AK7-PqkF1Dj9vq59_dD6uw4ZKE_AJB3c;'
res = requests.get(pc_url, headers=headers)
mid = re.findall(r'mblog&act=(\d+)\\', res.text)[0]
return mid
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="weibo comments spider")
parser.add_argument('-u', dest='username', help='weibo username', default='')
parser.add_argument('-p', dest='password', help='weibo password', default='')
parser.add_argument('-m', dest='max_page', help='max number of comment pages to crawl(number<int> larger than 0 or all)', default=100)
parser.add_argument('-l', dest='link', help='weibo comment link', default='')
parser.add_argument('-t', dest='url_type', help='weibo comment link type(pc or phone)', default='pc')
args = parser.parse_args()
wb = weibo()
username = args.username
password = args.password
try:
max_page = int(float(args.max_page))
except:
pass
url = args.link
url_type = args.url_type
if not username or not password or not max_page or not url or not url_type:
raise ValueError('argument error')
wb.login(username, password)
wb.getComments(url, url_type, max_page)

福尔摩星儿
- 粉丝: 0
- 资源: 229
最新资源
- COMSOL压电相控阵三维聚焦探头(模型编号:74#).pdf
- 85-0614我们的问题-1080P 高清-AVC.mp4
- COMSOL准BIC控制石墨烯临界耦合光吸收的仿真研究.pdf
- COMSOL压电纵波直探头水耦合实验:使用PZT-5A激励1MHz超声并自发自收底面反射波.pdf
- COMSOL凹凸双极板碱性电解水制氢模型:碱性电解槽性能仿真与电极析氢析氧模拟研究——包含图1模型开发器模拟内容对电解质电位及电极析氢界面气液两相流动的瞬态影响研究.pdf
- 88-0617目标函数求解SMO算法可耻的匿了-1080P 高清-AVC.mp4
- 87-0616偏导等于零代入有原式-1080P 高清-AVC.mp4
- 89-0618核函数-1080P 高清-AVC.mp4
- 92-0621软间隔水过-1080P 高清-AVC.mp4
- 90-0619多项式核-1080P 高清-AVC.mp4
- 91-0620高斯核-1080P 高清-AVC.mp4
- 94-0701贝叶斯分类器综述-1080P 高清-AVC.mp4
- 95-0702贝叶斯定理_一个应用-1080P 高清-AVC.mp4
- 93-0622支持向量回归-1080P 高清-AVC.mp4
- 96-0703贝叶斯定理-1080P 高清-AVC.mp4
- 97-0704预热_一个半朴素贝叶斯的例子-1080P 高清-AVC.mp4
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈


