我们如何限制Selenium中通过tagName进行抓取

问题描述 投票:0回答:1

我有以下代码可从堆栈溢出中抓取问题。我用tagName定位器抓取了一个网站。但是,它将所有标记内的所有内容都刮掉了,其中包括不需要的内容。如何限制呢?

例如,如果我只想在h1标签中得到问题,那么即使在h1标签中,刮板也会使用stackoverflow网站的名称来擦除问题。如何让他们只刮擦特定标签中的问题?

这是我的代码,它会将标记中的所有内容抓取。如何限制呢?:

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pandas as pd
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import csv

def to_do():
# vars...
    csv_file_location = r"C:\Users\intel\Desktop\data_file.csv"

    user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) ' \
                 'Chrome/80.0.3987.132 Safari/537.36'

    driver_exe = 'chromedriver'
    options = Options()
    options.add_argument("--headless")
    options.add_argument(f'user-agent={user_agent}')
    options.add_argument("--disable-web-security")
    options.add_argument("--allow-running-insecure-content")
    options.add_argument("--allow-cross-origin-auth-prompt")

    url = "https://stackoverflow.com/questions"

    driver = webdriver.Chrome(executable_path=r"C:\Users\intel\Downloads\setups\chromedriver.exe", options=options)
    driver.get(url)

    one_ = "A"

    two_ = "DIV"

    three_ = "A"

    try:
        element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, one_))
        )
        elements_1 = driver.find_elements_by_css_selector(one_)

        web_content_list = []
        for ele in elements_1:
            web_content_dict = {}
            web_content_dict["Title"] = ele.text
            web_content_list.append(web_content_dict)

        element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, two_))
        )
        elements_2 = driver.find_elements_by_css_selector(two_)

        for ele2 in elements_2:
            web_content_dict = {}
            web_content_dict["Title2"] = ele2.text
            web_content_list.append(web_content_dict)

        element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, three_))
        )

        elements_3 = driver.find_elements_by_css_selector(three_)

        for ele3 in elements_3:
            web_content_dict = {}
            web_content_dict["Title3"] = ele3.text
            web_content_list.append(web_content_dict)

        df = pd.DataFrame(web_content_list)
        new_df = pd.DataFrame({'Column 1': df['Title'].dropna(),
                  'Column 2': df['Title2'].dropna(),
                  'Column 3': df['Title3'].dropna()})
        new_df.to_csv(csv_file_location,
                  index=False, mode='a', encoding='utf-8')

        try:
            f = open(csv_file_location)
            print("Done !!!\n"*3)

        except IOError:
            print("File not accessible")

        finally:
            f.close()
        driver.quit()

    except:
        element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, one_))
        )
        elements_1 = driver.find_elements_by_css_selector(one_)

        element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, two_))
        )

        elements_2 = driver.find_elements_by_css_selector(two_)

        element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, three_))
        )

        elements_3 = driver.find_elements_by_css_selector(three_)

        df = pd.DataFrame({
            "Title1" : [ele for ele.text in elements_1],
            "Title2" : [ele2 for ele2.text in elements_2],
            "Title3" : [ele3 for ele3.text in elements_3],
        })
        df.to_csv(csv_file_location,
                  index=False, mode='w', encoding='utf-8')

        try:
            f = open(csv_file_location)
            print("Done !!!\n"*3)
            # Do something with the file
        except IOError:
            print("File not accessible")

        finally:
            f.close()
        driver.quit()

    finally:
        print("start")

if __name__ == "__main__":
    to_do()

任何帮助将不胜感激...

python python-3.x selenium web-scraping getelementsbytagname
1个回答
1
投票

如果您只想获取问题中的a标记,则可以使用此CSS选择器:

selector = '#questions a'

element = WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, selector))
        )
elements_1 = driver.find_elements_by_css_selector(selector)

此CSS选择器将在其下面找到'questions'的ID和所有锚点('a'),并且仅使用它们。

© www.soinside.com 2019 - 2024. All rights reserved.