python爬虫学习のurllibライブラリ

5758 ワード

urlopen
  • getタイプのページ:
    import urllib.request
     response = urllib.request.urlopen("http://www.baidu.com")
     print(response.read().decode('utf-8'))
     
  • postタイプのページ:
    import urllib.request
    import urllib.parse
    data=bytes(urllib.parse.urlencode({'word':'hello'}),encoding='utf-8')
    response = urllib.request.urlopen("http://httpbin.org/post",data=data)# http     
    print(response.read())
     
  •  タイムアウト
    import urllib.request
    
    response = urllib.request.urlopen("http://httpbin.org/get",timeout=1)
    print(response.read())
    
     
  •  
    応答
    ステータスコード、レスポンスヘッダ
    import socket
    import urllib.request
    import urllib.error
    try:
        response=urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
    except urllib.error.URLError as e:
        if isinstance(e.reason,socket.timeout):
            print("TIME OUT")
     Request
  • import socket
    import urllib.request
    import urllib.error
    try:
        response=urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
    except urllib.error.URLError as e:
        if isinstance(e.reason,socket.timeout):
            print("TIME OUT")
     
    import urllib.request
    
    request = urllib.request.Request('http://www.baidu.com')
    response = urllib.request.urlopen(request)
    print(response.read().decode('utf-8'))
     
  •  
     ハードラー 
  • エージェント
    from urllib import request,parse
    url = "http://httpbin.org/post"
    headers = {
        'User-Agent':'Mozilla/4.0(compatible;MSIE ;Windows NT)',
        'Host':'httpbin.org'
    }
    dict = {
        'name':'Germey'
    }
    data  = bytes(parse.urlencode(dict),encoding='utf-8')
    req = request.Request(url=url,data=data,headers=headers,method='POST')
    response = request.urlopen(req)
    print(response.read().decode('utf-8'))
    
         
    
    from urllib import request,parse
    url = "http://httpbin.org/post"
    dict = {
        'name':'Germey'
    }
    data  = bytes(parse.urlencode(dict),encoding='utf-8')
    req = request.Request(url=url,data=data,method='POST')
    req.add_header( 'User-Agent','Mozilla/4.0(compatible;MSIE ;Windows NT)')
    response = request.urlopen(req)
    print(response.read().decode('utf-8'))
     
  • cookie(検査-appication) 
    import urllib.request
    proxy_handler = urllib.request.ProxyHandler({
        'http':'http://127.0.0.1:9743',
        'https':'https://127.0.0.1:9743' #    
    })
    opener = urllib.request.build_opener(proxy_handler)
    response = opener.open("http://www.baidu.com")
    print(response.read())
    保存 
    import http.cookiejar,urllib.request
    
    cookie = http.cookiejar.CookieJar()
    handler = urllib.request.HTTPCookieProcessor(cookie)
    opener =urllib.request.build_opener(handler)
    response = opener.open("http://www.baidu.com")
    for item in cookie:
        print(item.name+"="+item.value)
     読み込み
    import http.cookiejar,urllib.request
    filename = 'cookie.txt'
    cookie =http.cookiejar.MozillaCookieJar(filename)
    handler = urllib.request.HTTPCookieProcessor(cookie)
    opener =urllib.request.build_opener(handler)
    response = opener.open("http://www.baidu.com")
    cookie.save(ignore_discard=True,ignore_expires=True)
    異常処理
    import http.cookiejar,urllib.request
    cookie = http.cookiejar.MozillaCookieJar()
    cookie.load('cookie.txt',ignore_discard=True,ignore_expires=True)
    handler = urllib.request.HTTPCookieProcessor(cookie)
    opener = urllib.request.build_opener(handler)
    response = opener.open("http://www.baidu.com")
    print(response.read().decode('utf-8'))
    良い書き方:
    from urllib import request,error
    try:
        response = request.urlopen("http://www.cuiqingcai.com/index.htm")
    except error.URLError as e:
        print(e.reason)
    from urllib import request,error
    
    try:
        response = request.urlopen("http://cuiqingcai.com/index.htm")
    except error.HTTPError as e: #       
        print(e.reason,e.code,e.headers,sep='
    ') except error.URLError as e: # print(e.reason) else : print("Request Successfully")
    URL解析
  • urlparse
    import socket
    import urllib.request
    import urllib.error
    
    try:
        response = urllib.request.urlopen('http://www.baidu.com',timeout=0.01)
    except urllib.error.URLError as e:
        print(type(e.reason))
        if isinstance(e.reason,socket.timeout):
            print("TIME OUT")
     
  • urlunparse
    from urllib.parse import urlparse
    
    result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
    result = urlparse('www.baidu.com/index.html;user?id=5#comment',scheme='https')# scheme https
    result = urlparse('http://www.baidu.com/index.html;user?id=5#comment',scheme='https') #scheme  http
    result = urlparse('http://www.baidu.com/index.html;user?id=5#comment',allow_fragments=False)# fragment    query 
    result = urlparse('http://www.baidu.com/index.html#comment',allow_fragments=False)# fragment    params 
    print(type(result),result)
    
    
     
  • urljon
    from urllib.parse import urlunparse
    
    data = ['http','www.baidu.com','index.html','user','a=6','comment']
    print(urlunparse(data))
     
  • urlencode
    from urllib.parse import urljoin
    
    print(urljoin('http://www.baidu.com','FAQ.html'))#      ,      http(s)       
    
     
  •