[ネットワーク爬虫類大王]10自動化優先パラメータ設定と検索


from bs4 import BeautifulSoup
from selenium import webdriver
import time


while True:
    try:
        site = input("사이트를 입력하세요\n")
        if site.find("naver") != -1 or site.find("네이버") != -1:
            site = "https://naver.com"
            break
        elif site.find("daum") != -1 or site.find("다음") != -1:
            site = "https://daum.net"
            break
        elif site.find("google") != -1 or site.find("구글") != -1:
            site = "https://www.google.co.kr"
            break 
        else:
            print("지원하지 않는 사이트입니다.")
    except:
        print(" 오류가 발생하였습니다.")
        
try:
    query_txt = input('크롤링할 키워드는 무엇입니까\n')
    
    # 크롬드라이버를 사용해 웹브라우져를 실행
    path = "c:\\py_temp\chromedriver_win32\chromedriver.exe"
    driver = webdriver.Chrome(path)
    driver.get(site)

    time.sleep(2) #2초기다림

	# 검색창의 이름을 찾아서 검색어를 입력
    if site == "https://naver.com":
        element = driver.find_element_by_id("query")
        element.send_keys(query_txt)
        driver.find_element_by_id("search_btn").click()

    elif site == "https://daum.net" or site == "https://google.co.kr":

        element = driver.find_element_by_id("q")
        element.send_keys(query_txt)
        element.send_keys("\n")
        
    elif site == "https://www.google.co.kr":

        element = driver.find_element_by_name("q")
        element.send_keys(query_txt)
        element.send_keys("\n")

except:
    print("오류가 발생하였습니다.")
サイトごとに違います
find_element_by_name('html_name')
find_element_by_id('html_id')
find_element_by_xpath('/html/body/some/xpath')
find_element_by_css_selector('#css > div.selector')
find_element_by_class_name('some_class_name')
find_element_by_tag_name('h1')