授業時間15第3節練習項目:賃貸情報を取得する
4868 ワード
成果
コード#コード#
from bs4 import BeautifulSoup
import requests # s
import time
path_detail ='./resut_detail.txt'
path_links ='./resut_links.txt'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
'Cookie':'abtest_ABTest4SearchDate=b; OZ_1U_2282=vid=v7f3c69fed80eb.0&ctime=1475593887<ime=0; OZ_1Y_2282=erefer=-&eurl=http%3A//gz.xiaozhu.com/fangzi/2303611027.html&etime=1475593887&ctime=1475593887<ime=0&compid=2282; _ga=GA1.2.1488476801.1475593889; gr_user_id=13bbe192-e386-4074-8ca0-a4a882ba66aa; gr_session_id_59a81cc7d8c04307ba183d331c373ef6=8d7a3db1-e35f-4f23-9ce3-e73afd78b45a; __utma=29082403.1488476801.1475593889.1475594056.1475594056.1; __utmb=29082403.1.10.1475594056; __utmc=29082403; __utmz=29082403.1475594056.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)'
}
def get_detail(url_detail='http://gz.xiaozhu.com/fangzi/2303611027.html'):
time.sleep(15)
code = requests.get(url_detail)
print(code)
web_content = requests.get(url_detail)# headers
soup = BeautifulSoup(web_content.text,'lxml')
titles = soup.select('div.pho_info h4 em')
addresses = soup.select('body > div.wrap.clearfix.con_bg > div.con_l > div.pho_info > p')
rentals = soup.select('div.day_l')
images = soup.select('img#curBigImage')# id ?
landlord_photos = soup.select('div.member_pic > a > img')
landlord_genders = soup.select('#floatRightBox > div.js_box.clearfix > div.member_pic > div')
landlord_names = soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > a')
for title, address, rental, image, landlord_photo,landlord_gender, landlord_name in zip(titles, addresses, rentals, images, landlord_photos, landlord_genders, landlord_names):
landlord_gender = str(landlord_gender.get('class'))#str 。。
if landlord_gender == '[\'member_ico\']':
landlord_gender = ' '
elif landlord_gender == '[\'member_ico1\']':
landlord_gender = ' '
else:
landlord_gender = ' '
date = {
'title': title.get_text(),
'address':address.get('title'),
'rental':rental.get_text(),
'image':image.get('src'),
'landlord_photo':landlord_photo.get('src'),
'landlord_gender':landlord_gender,
'landlord_name':landlord_name.get_text()
}
list_value = list(date.values())
with open(path_detail,'a+') as text: # ??, a+?
text.write(str(list_value)+'
')
print(date)
#get_detail()
url_list = ['http://gz.xiaozhu.com/tianhe-duanzufang-p{}-8/'.format(i) for i in range(1,2)] # 2
def get_moreurls():
with open(path_links,'a+') as text:
for link in url_list:
time.sleep(2)
web_content = requests.get(link) # headers,
soup = BeautifulSoup(web_content.text, 'lxml')
link_lists = soup.select('#page_list ul.pic_list.clearfix li a.resule_img_a')
for detail_link in link_lists:
print(detail_link.get('href'))
text.write(str(detail_link.get('href')+'
')) #
get_detail(url_detail=str(detail_link.get('href')))#
get_moreurls()