将网站清理成csv文件

问题描述 投票:0回答:1
import csv
import requests
from bs4 import BeautifulSoup

page = requests.get("https://www.cbssports.com/nba/stats/playersort/nba/year-2019-season-preseason-category-scoringpergame")
soup = BeautifulSoup(page.content, 'html.parser')

for record in soup.find_all('tr'):
    try:
        print(record.contents[0].text)
        print(record.contents[6].text)
        print(record.contents[7].text)
        print(record.contents[8].text)
        print(record.contents[9].text)
        print(record.contents[10].text)
        print(record.contents[12].text)
        print(record.contents[13].text)
        print(record.contents[14].text)
        print(record.contents[15].text)
    except:
        pass
    print('\n')

def scrape_data(url):

    response = requests.get("https://www.cbssports.com/nba/stats/playersort/nba/year-2019-season-preseason-category-scoringpergame", timeout=10)
    soup = BeautifulSoup(response.content, 'html.parser')

    table = soup.find_all('table')[1]

    rows = table.select('tbody > tr')

    header = [th.text.rstrip() for th in rows[1].find_all('th')]

    with open('statsoutput.csv', 'w') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow(header)
        for row in rows[1:]:
            data = [th.text.rstrip() for th in row.find_all('td')]
            writer.writerow(data)


if __name__=="__main__":
    url = "https://www.cbssports.com/nba/stats/playersort/nba/year-2019-season-preseason-category-scoringpergame"
    scrape_data(url)

iv一直试图将统计信息从此网页导出到csv文件。当我运行我的代码时,第一部分工作正常并检索我想要的数据。但是该函数无法将其导出到csv文件中,并且出现此错误:

table = soup.find_all('table')[1]IndexError:列表索引超出范围

而且我不太确定为什么。

python html web-scraping export-to-csv
1个回答
© www.soinside.com 2019 - 2024. All rights reserved.