from selenium import webdriver
from lxml import etree
import csv
# ,
driver = webdriver.Chrome()
driver.get("https://www.douyu.com/directory/all")
class DouyuSpider:
def __init__(self):
self.n = 0
self.page = 1
# 、
def getData(self):
# xpath
parseHtml = etree.HTML(driver.page_source)
names = parseHtml.xpath('//div[@id="live-list-content"]//span[@class="dy-name ellipsis fl"]/text()')
numbers = parseHtml.xpath('//div[@id="live-list-content"]//span[@class="dy-num fr"]/text()')
# names : [" 1"," 2",....]
# numbers:["90.8 ","90 ",...]
# zip(L1,L2) : [(1,"A"),(2,"B"),(3,"C")]
for name,number in zip(names,numbers):
L = [name.strip(),number.strip()]
self.writeData(L)
self.n += 1
# csv
def writeData(self,L):
with open(" .csv","a",newline="",encoding="gb18030") as f:
writer = csv.writer(f)
writer.writerow(L)
#
def workOn(self):
for i in range(1,11):
self.getData()
print(" %d " % i)
# class,
if driver.page_source.find("shark-pager-next shark-pager-disable shark-pager-disable-next") == -1:
driver.find_element_by_class_name("shark-pager-next").click()
else:
print(" ")
break
print(" %d " % self.n)
if __name__ == "__main__":
spider = DouyuSpider()
spider.workOn()