新闻网站用Python抓

问题描述 投票:0回答:2

我正试图抓一些新闻。我有一个更大的3k文章列表来自这个网站,按标准选择,并且(考虑到我是Python的新手)我拿出这个脚本来抓它们:

import pandas as pd
import bs4

from urllib.request import urlopen
from bs4 import BeautifulSoup

import csv
# get the URL list
list1 = []

a = 'https://www.dnes.bg/sofia/2019/03/13/borisov-se-pohvali-prihodite-ot-gorivata-sa-sys-7-poveche.404467'
b = 'https://www.dnes.bg/obshtestvo/2019/03/13/pazim-ezika-si-pravopis-pod-patronaja-na-radeva.404462'
c = 'https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091'
list1.append(a)
list1.append(b)
list1.append(c)
# define the variables
#url = "https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091"
list2 = list1 #[0:10]
#type(list2)

href = []
title = []
subtitle = []
time = []
article = []
art1 = []

#
#dd = soup.find("div", "art_author").text
#dd

filename = "scraped.csv"
f = open(filename, "w")
#headers = "href;title;subtitle;time;article\n"
headers = "title;subtitle;time;article\n"
f.write(headers)


for url in list2:
    html = urlopen(url)
    soup = BeautifulSoup(html, 'lxml').decode('windows-1251')

    href = url
    title = soup.find("h1", "title").string
    #title = soup.find("h1", "title").string
    #title.extend(soup.find("h1", "title").string) # the title string
    subtitle = soup.find("div", "descr").string
    #subtitle.extend(soup.find("div", "descr").string) # the subtitle string
    time = soup.find("div", "art_author").text
    #time.extend(soup.find("div", "art_author").text)
    #par = soup.find("div", id="art_start").find_all("p")
    art1.extend(soup.find("div", id="art_start").find_all("p"))

    for a in art1:
        #article.extend(art1.find_all("p"))
        article = ([a.text.strip()])
        break

    #href = "".join(href)    
    title = "".join(title)
    subtitle = "".join(subtitle)
    time = "".join(time)
    article = "".join(article)

    #f.write(href + ";" + title + ";" + subtitle + ";" + time + ";" + article + "\n")
    f.write(title + ";" + subtitle + ";" + time + ";" + article +"\n")
f.close()

现在的主要问题是我收到一个错误:

  File "<ipython-input-12-9a796b182a82>", line 24, in <module>
    title = soup.find("h1", "title").string
TypeError: slice indices must be integers or None or have an __index__ method

我真的找不到解决方案。

第二个问题是每当我成功抓取一个站点时,就会出现一些空单元,这意味着我必须通过Ajax找到一种方法。

我使用的是Anaconda版本2018.12。

python beautifulsoup scrape
2个回答
0
投票

我偶然发现的东西([这里] https://www.youtube.com/watch?v=FSH77vnOGqU):

import bs4 as bs
import sys
import urllib.request
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl


class Page(QWebEnginePage):
    def __init__(self, url):
        self.app = QApplication(sys.argv)
        QWebEnginePage.__init__(self)
        self.html = ''
        self.loadFinished.connect(self._on_load_finished)
        self.load(QUrl(url))
        self.app.exec_()

    def _on_load_finished(self):
        self.html = self.toHtml(self.Callable)
        print('Load finished')

    def Callable(self, html_str):
        self.html = html_str
        self.app.quit()



def main():
    page = Page('https://pythonprogramming.net/parsememcparseface/')
    soup = bs.BeautifulSoup(page.html, 'html.parser')
    js_test = soup.find('p', class_='jstest')
    print(js_test.text)

if __name__ == '__main__': main()

0
投票

好。我修复了你的soup对象存储为字符串的问题,所以你可以使用bs4来解析html。我也选择使用pandas .to_csv(),因为我对它更熟悉,但它可以获得所需的输出:

import pandas as pd
from bs4 import BeautifulSoup
import requests


# get the URL list
list1 = []

a = 'https://www.dnes.bg/sofia/2019/03/13/borisov-se-pohvali-prihodite-ot-gorivata-sa-sys-7-poveche.404467'
b = 'https://www.dnes.bg/obshtestvo/2019/03/13/pazim-ezika-si-pravopis-pod-patronaja-na-radeva.404462'
c = 'https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091'
list1.append(a)
list1.append(b) 
list1.append(c) 
# define the variables
#url = "https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091"
list2 = list1 #[0:10]
#type(list2)



results = pd.DataFrame()
for url in list2:

    html = requests.get(url)
    soup = BeautifulSoup(html.text, 'html.parser')

    href = url
    title = soup.find("h1", "title").text
    #title = soup.find("h1", "title").string
    #title.extend(soup.find("h1", "title").string) # the title string
    subtitle = soup.find("div", "descr").text
    #subtitle.extend(soup.find("div", "descr").string) # the subtitle string
    time = soup.find("div", "art_author").text
    #time.extend(soup.find("div", "art_author").text)
    #par = soup.find("div", id="art_start").find_all("p")
    art1 = soup.find("div", id="art_start").find_all("p")

    article = []
    for a in art1:
        if 'googletag.cmd.push' not in a.text:
            article.append(a.text.strip())
    article = ' '.join(article)



    temp_df = pd.DataFrame([[title, subtitle, time, article]], columns = ['title','subtitle','time','article'])
    results = results.append(temp_df).reset_index(drop=True)

results.to_csv("scraped.csv", index=False, encoding='utf-8-sig')

输出:

print (results.to_string())
                                               title                                           subtitle                                               time                                            article
0  Борисов се похвали: Приходите от горивата са с...  Мерките за изсветляване на сектора действат, к...  Обновена: 13 мар 2019 13:24 | 13 мар 2019 11:3...  Приходите от горивата са със 7% повече. Това с...
1  "Пазим езика си": Правопис под патронажа на Ра...  Грамотността зависи не само от училището, смят...  Обновена: 13 мар 2019 11:34 | 13 мар 2019 11:2...  За втора поредна година Сдружение "Живата вода...
2  Политиката – "неканен гост" на празничната нов...  Основателни ли бяха критиките на президента Ру...               3 яну 2019 10:45, Цветелин Димитров   Оказа ли се политиката "неканен гост" на празн...
© www.soinside.com 2019 - 2024. All rights reserved.