Python HTML解析器分页

问题描述 投票:1回答:1

我是python的新手,在尝试HTML解析器方面已经走了很远的距离,但是我在页面底部停留在如何使评论分页以便在网站上工作的问题。

URL在PasteBin代码中,出于隐私原因,我在此线程中省略了URL。

非常感谢您的帮助。

# Reviews Scrape

from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup

my_url = 'EXAMPLE.COM'

# opening up connection, grabbing, the page
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()

# HTML Parsing
page_soup = soup(page_html, "html.parser")

# Grabs each review
reviews = page_soup.findAll("div",{"class":"jdgm-rev jdgm-divider-top"})

filename = "compreviews.csv"
f = open(filename, "w")

headers = "Score, Title, Content\n"

f.write(headers)
# HTML Lookup Location per website and strips spacing
for container in reviews:
    # score = container.div.div.span["data-score"]
    score = container.findAll("span",{"data-score":True})
    user_score = score[0].text.strip()

    title_review = container.findAll("b",{"class":"jdgm-rev__title"})
    user_title = title_review[0].text.strip()

    content_review = container.findAll("div",{"class":"jdgm-rev__body"})
    user_content = content_review[0].text.strip()

    print("user_score:" + score[0]['data-score'])
    print("user_title:" + user_title)
    print("user_content:" + user_content)

    f.write(score[0]['data-score'] + "," +user_title + "," +user_content + "\n")

f.close()
python python-3.x web-scraping beautifulsoup html-parsing
1个回答
0
投票

该页面使用查询字符串执行xhr GET请求以获取结果。该查询字符串具有用于每页评论和页码的参数。您可以发出初始请求,每页的最大评论数为31,从返回的json中提取html,然后获取页面数;编写循环以遍历所有页面以获得结果。下面的示例构造:

import requests
from bs4 import BeautifulSoup as bs

start_url = 'https://urlpart&page=1&per_page=31&product_id=someid'

with requests.Session() as s:
    r = s.get(start_url).json()
    soup = bs(r['html'], 'lxml')
    print([i.text for i in soup.select('.jdgm-rev__author')])
    print([i.text for i in soup.select('.jdgm-rev__title')])
    total_pages = int(soup.select_one('.jdgm-paginate__last-page')['data-page'])

    for page in range(2, total_pages + 1):
        r = s.get(f'https://urlpart&page={page}&per_page=31&product_id=someid').json()
        soup = bs(r['html'], 'lxml')
        print([i.text for i in soup.select('.jdgm-rev__author')])
        print([i.text for i in soup.select('.jdgm-rev__title')]) #etc

示例数据帧到csv

import requests
from bs4 import BeautifulSoup as bs
import pandas as pd

start_url = 'https://urlpart&page=1&per_page=31&product_id=someid'

authors = []
titles = []

with requests.Session() as s:
    r = s.get(start_url).json()
    soup = bs(r['html'], 'lxml')
    authors.extend([i.text for i in soup.select('.jdgm-rev__author')])
    titles.extend([i.text for i in soup.select('.jdgm-rev__title')])
    total_pages = int(soup.select_one('.jdgm-paginate__last-page')['data-page'])

    for page in range(2, total_pages + 1):
        r = s.get(f'https://urlpart&page={page}&per_page=31&product_id=someid').json()
        soup = bs(r['html'], 'lxml')
        authors.extend([i.text for i in soup.select('.jdgm-rev__author')])
        titles.extend([i.text for i in soup.select('.jdgm-rev__title')]) #etc

headers = ['Author','Title']
df = pd.DataFrame(zip(authors,titles), columns = headers)
df.to_csv(r'C:\Users\User\Desktop\data.csv', sep=',', encoding='utf-8',index = False )
© www.soinside.com 2019 - 2024. All rights reserved.