pythonはあるウェブサイトの原図を壁紙にとります。
pythonは本当に不思議なものだと言わざるを得ません。三日間勉強すればサイトに登ることができます。
完全コード
完全コード
# -*- coding: utf-8 -*-
"""
Created on Wed May 26 17:53:13 2021
@author: 19088
"""
import urllib.request
import os
import pickle
import re
import random
import sys
# http
class getHttpAgents:
#
def __init__(self):
self.attArray=self.__loadAgentList()
self.myagent=""
#
def openUrl(self,url,istry=1):
response=""
ip=""
if(0 != len(self.myagent.strip())):
ip=self.myagent
i=1
if not istry:
i=99
while i<100:
try:
#print(self.attArray)
if(0 == len(self.attArray) and 0==len(ip.strip())):
req=urllib.request.Request(url)
#
req.add_header("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36")
response=urllib.request.urlopen(req)
else:
if(0 != len(self.attArray)):
ip=random.choice(self.attArray)
if(0 != len(self.myagent.strip())):
ip=self.myagent
print(" {} {}".format(ip,url))
#
proxy={"http":ip}
#print(proxy)
#
proxy_support=urllib.request.ProxyHandler(proxy)
# opener
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36")]
#urllib.request.install_opener(opener)
#
response=opener.open(url)
except:
if not istry:
print("{} ".format(ip))
else:
print(" {} !".format(i))
else:
break;
finally:
i+=1
if 11==i and istry:
raise ValueError
if not response:
return
html=response.read()
#print(html)
return html
# ip
def checkMyIpPool(self):
agentsResult=[]
agentList=self.attArray
for iter in agentList:
ip=iter
self.setMyIp(ip)
b=self.__getMyIp()
if not b:
#
#agentList.pop(-iter)
pass
else:
agentsResult.append(ip)
#print(b)
# ip
self.__writeAgentList(agentsResult)
self.__setAgents(agentsResult)
self.setMyIp("")
#
def getAgents(self,html):
#print(html)
# ip
pattern = re.compile(r'(<td>)\s*((25[0-5]|2[0-4]\d|[0-1]\d\d|\d\d|\d)\.){3}(25[0-5]|2[0-4]\d|[0-1]\d\d|\d\d|\d)\s*</td>')
ipList=[]
ip=pattern.finditer(html)
for ipiter in ip:
ipText=ipiter.group()
ipGroup=re.search(r"((25[0-5]|2[0-4]\d|[0-1]\d\d|\d\d|\d)\.){3}(25[0-5]|2[0-4]\d|[0-1]\d\d|\d\d|\d)", ipText)
ipList.append(ipGroup.group())
#
portList=[]
pattern = re.compile(r'(<td>)\s*\d+\s*</td>')
port = pattern.finditer(html)
for portiter in port:
portText=portiter.group()
portGroup=re.search(r"\d+", portText)
portList.append(portGroup.group())
if(len(ipList) is not len(portList)):
print(" : ip !")
return
ipDict=dict(zip(ipList,portList))
agentList=[]
for key in ipDict:
agentList.append(key+":"+ipDict.get(key))
agentsResult=[]
for iter in agentList:
ip=iter
self.setMyIp(ip)
b=self.__getMyIp()
if not b:
#
pass
#agentList.pop(-iter)
else :
agentsResult.append(ip)
self.__setAgents(agentsResult)
print("{} ".format(ip))
agentsResult.extend(self.attArray)
# ip
if(0==len(agentsResult)):
return
self.__writeAgentList(agentsResult)
self.__setAgents(agentsResult)
self.setMyIp("")
return agentList
def __setAgents(self,ipArray):
self.attArray=ipArray
def setMyIp(self,ip):
self.myagent=ip
# ip
def __writeAgentList(self, agentList):
if os.path.exists("agent.pkl"):
os.remove("agent.pkl") # dump load
with open("agent.pkl.","wb") as f:
pickle.dump(agentList, f)
print(" {} ".format(len(agentList)))
# ip
def __loadAgentList(self):
agentlist=[]
if not os.path.exists("agent.pkl"):
return agentlist
with open("agent.pkl","rb") as f:
agentlist=pickle.load(f)
print(" {} ".format(len(agentlist)))
return agentlist
# ip
def __getMyIp(self,ip=""):
url="https://www.baidu.com/"
html=""
try:
html=self.openUrl(url,0).decode("utf-8")
except:
return
# ip
#pattern = re.compile(r'((25[0-5]|2[0-4]\d|[0-1]\d\d|\d\d|\d)\.){3}(25[0-5]|2[0-4]\d|[0-1]\d\d|\d\d|\d)')
#groupIp=pattern.search(html)
#if groupIp:
#return groupIp.group()
else:
return html
#
def crawlingAgents(self,index):
try:
url ="http://ip.yqie.com/ipproxy.htm"
print(url)
html=self.openUrl(url)
html=html.decode("utf-8")
self.setMyIp("") # ip
self.getAgents(html)
except Exception as e:
print("{} ".format(url))
#
page=index
indexCur=1
while indexCur<=page:
try:
url=r"https://www.89ip.cn/index_{}.html".format(indexCur)
print(url)
self.setMyIp("")
html=self.openUrl(url) # ip
html=html.decode("utf-8")
self.getAgents(html)
except Exception as e:
print("{} ".format(url))
finally:
indexCur+=1
indexCur=1
while indexCur<=page:
try:
url=r"http://www.66ip.cn/{}.html".format(indexCur)
print(url)
self.setMyIp("")
html=a.openUrl(url) # ip
html=html.decode("gb2312")
self.getAgents(html)
except Exception as e:
print("{} ".format(url))
finally:
indexCur+=1
indexCur=1
while indexCur<=page:
try:
url=r"http://www.ip3366.net/?stype=1&page={}".format(indexCur)
print(url)
self.setMyIp("")
html=a.openUrl(url) # ip
html=html.decode("gb2312")
self.getAgents(html)
except Exception as e:
print("{} ".format(url))
finally:
indexCur+=1
indexCur=1
while indexCur<=page:
try:
url=r"http://www.kxdaili.com/dailiip/1/{}.html".format(indexCur)
print(url)
self.setMyIp("")
html=a.openUrl(url) # ip
html=html.decode("utf-8")
self.getAgents(html)
except Exception as e:
print("{} ".format(url))
finally:
indexCur+=1
#
class downLoadPictures:
#
def __init__(self):
self.sortKey={} #
self.urlLoad=getHttpAgents()
self.bzmenuDict={} #
self.sortscreenDict={} #
self.littleSignDict={} #
pass
def getPictures(self,url):
# page
pagerHtml=self.urlLoad.openUrl(url)
# pageFolder folder url
folderPictursUrl=self.readPages(pagerHtml).values()
if not folderPictursUrl:
print(" !")
return
for floderiterUrl in folderPictursUrl:
folderUrl=str("https://www.ivsky.com/")+floderiterUrl
folderHtml=self.urlLoad.openUrl(folderUrl)
# url
pictursUrlDict=self.readFolders(folderHtml)
for iterPictureKey in pictursUrlDict:
fileName=iterPictureKey+".jpg"
pictureUrl=str("https://www.ivsky.com/")+pictursUrlDict.get(iterPictureKey)
#
pictureHtml=self.urlLoad.openUrl(pictureUrl)
picturDownUrl=self.readPictures(pictureHtml)
pictureDownHtml=self.urlLoad.openUrl(picturDownUrl)
if not pictureDownHtml:
continue
#
with open(fileName,"wb+") as f:
f.write(pictureDownHtml)
#
def getHrefMap(self,html,isPicture=0,isFolder=0):
hrefDict={}
pattern=re.compile(r'<a\s*.*?\s*</a>',re.I)
if isPicture:
pattern=re.compile(r'<p>\s*?<a\s*.*?</p>',re.I)
hrefIter=pattern.finditer(html)
index=0
for iter in hrefIter:
hrefText=iter.group()
#
pattern=re.compile(r'"\s*?>\s*?.*?</a>',re.I)
name=""
nameGroup=pattern.search(hrefText)
if nameGroup:
name=nameGroup.group()
if(5==len(nameGroup.group().replace(" ", ""))):
pattern=re.compile(r'title=".*?"',re.I)
nameGroup=pattern.search(hrefText)
if nameGroup:
name=nameGroup.group()[7:-1]
name=name[2:-4].replace(" ", '')
# href
pattern=re.compile(r'href=".*?" rel="external nofollow" ',re.I)
url=""
urlGroup=pattern.search(hrefText)
if urlGroup:
url=urlGroup.group()[6:-1].replace(" ", '')
if isFolder:
index+=1
name+="_"+str(index)
hrefDict[name]=url
return hrefDict
#
def readPages(self,html):
html=html.decode("utf-8")
#
#
pattern=re.compile(r'<ul\s*class="bzmenu".*?</ul>',re.I)
sortClassGroup=pattern.search(html)
if sortClassGroup:
sortMessage=sortClassGroup.group()
self.bzmenuDict=self.getHrefMap(sortMessage)
#print(self.bzmenuDict)
else:
print(" !")
return
#
pattern=re.compile(r'<ul\s*class="sall_dd".*?</ul>',re.I)
sortClassGroup=pattern.search(html)
if sortClassGroup:
sortMessage=sortClassGroup.group()
self.sortscreenDict=self.getHrefMap(sortMessage)
#print(self.sortscreenDict)
else:
print(" !")
return
#
pattern=re.compile(r'<div\s*class="sline".*?</div>',re.I)
sortClassGroup=pattern.search(html)
if sortClassGroup:
sortMessage=sortClassGroup.group()
#print(sortMessage)
self.littleSignDict=self.getHrefMap(sortMessage)
#print(self.littleSignDict)
else:
print(" ")
return
pictureDict={}
#
pattern=re.compile(r'<ul\s*class="ali".*?</ul>',re.I)
sortClassGroup=pattern.search(html)
if sortClassGroup:
sortMessage=sortClassGroup.group()
pictureDict=self.getHrefMap(sortMessage,1)
#print(pictureDict)
else:
print(" !")
return
#print(html)
return pictureDict
#
def readFolders(self,html):
if not html:
return
html=html.decode("utf-8")
#
#
pattern=re.compile(r'<ul\s*class="pli".*?</ul>',re.I)
sortClassGroup=pattern.search(html)
pictureUrlDict={}
if sortClassGroup:
sortMessage=sortClassGroup.group()
#print(sortMessage)
pictureUrlDict=self.getHrefMap(sortMessage,1,1)
#print(pictureUrlDict)
else:
print(" ")
return
return pictureUrlDict
#
def readPictures(self,html):
if not html:
return
html=html.decode("utf-8")
#
#
pattern=re.compile(r'<div\s*class="pic".*?</div>',re.I)
sortClassGroup=pattern.search(html)
pictureUrl=""
if sortClassGroup:
sortMessage=sortClassGroup.group()
# href
pattern=re.compile(u"src='.*?'",re.I)
url=""
urlGroup=pattern.search(sortMessage)
if urlGroup:
url=urlGroup.group()[5:-1].replace(" ", '')
url=url.replace('img-pre', 'img-picdown')
url=url.replace('pre', 'pic')
url=str("https:")+url
#print(sortMessage)
pictureUrlDict=url
#print(url)
else:
print(" ")
return
return pictureUrlDict
class UrlUser:
def __init__(self):
self.agent=getHttpAgents()
self.downPicture=downLoadPictures()
#
def downPictures(self):
#url="https://www.ivsky.com/bizhi"
#b.getPictures(url)
#
dirPath=input(" :")
if not os.path.exists(dirPath):
os.mkdir(dirPath)
if not os.path.isdir(dirPath):
print("savePath is wrong!")
sys.exit()
os.chdir(dirPath) #
#url=r"https://www.ivsky.com/bizhi/nvxing_1920x1080/index_{}.html"
page=input(" ?
")
indexRe = re.search(r"\d+", page)
if(not indexRe):
print(" !")
indexRe=int(indexRe.group())
indexCur=1
while indexCur<=indexRe:
try:
#
url=r"https://www.ivsky.com/bizhi/nvxing_1920x1080/index_{}.html".format(indexCur)
print(url)
self.downPicture.getPictures(url)
except:
print(" !")
pass
finally:
indexCur+=1
#
def downAgents(self):
page=input(" ?
")
indexRe = re.search(r"\d+", page)
if(not indexRe):
print(" !")
return
indexRe=int(indexRe.group())
self.agent.crawlingAgents(indexRe)
#
def checkPool(self):
self.agent.checkMyIpPool()
if __name__ == "__main__":
print("*"*20)
print("1.
")
print("2.
")
print("3. ")
print("*"*20)
mode=input(" :
")
indexRe = re.search(r"\d+", mode)
if(not indexRe):
print(" !")
sys.exit()
indexRe=int(indexRe.group())
#
uesrObj=UrlUser()
if 1 == indexRe:
uesrObj.downAgents()
elif 2 == indexRe:
uesrObj.checkPool()
elif 3 == indexRe:
uesrObj.downPictures()
else:
print(" !")
sys.exit()
print(" !")
効果図