requests入門簡単爬虫(三)
httpは無状態のプロトコルであることはよく知られています.つまり毎回の要求は独立しています.このように毎回サーバにアクセスすることを要求します.爬虫類はどのように需要を解決しますか?
#
# , cookies ,
headers = cookies
response = requests.get(url, headers=headers)
import requests
from fake_useragent import UserAgent
ua = UserAgent()
headers = {
'User-Agent': ua.random
}
# 1:
def renren_login(user, pwd):
url = 'http://www.renren.com/PLogin.do'
form_data = {
'email': user,
'password': pwd
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
}
response = requests.post(url, data=form_data, headers=headers)
with open('renren_login.html', 'w', encoding='utf-8')as fp:
fp.write(response.text)
# 2:
def ren_index(url):
headers = {
'Cookie': 'zi ji bu quan',
}
respond = requests.post(url, headers=headers)
con = respond.content.decode('utf-8')
with open('ren_index.html', 'w', encoding='utf-8') as f:
f.write(con)
print(respond.status_code)
if __name__ == '__main__':
#
# renren_login(user='',pwd='')
# " "
url = 'http://status.renren.com/status/v7/972262212'
renren_spider(url)
第二の方法はsessionオブジェクトを作成し、sessionオブジェクトは登録を要求し、同じsessionオブジェクトを使用して要求訪問を開始する.import requests
from fake_useragent import UserAgent
ua = UserAgent()
headers = ua.random
# 1:
def ren_login(form_data):
url = 'http://www.renren.com/PLogin.do'
# session
respond = session.post(url, data=form_data, headers=headers)
with open('ren_login.html', 'w', encoding='utf-8') as f:
f.write(respond.text)
print(respond.status_code)
# 2:
def ren_index(url):
headers = {
'Cookie': 'zi ji bu quan',
}
# session
respond = session.post(url, headers=headers)
con = respond.content.decode('utf-8')
with open('ren_index.html', 'w', encoding='utf-8') as f:
f.write(con)
print(respond.status_code)
# 1. session
session = requests.session()
if __name__ == '__main__':
form_data = {}
form_data['email'] = input(" ")
form_data['password'] = input(" ")
# " "
url = 'http://status.renren.com/status/v7/972262212'
ren_index(url)