from bs4 import BeautifulSoup
import re
import urllib2
import urllib
list_open = open("weblist.txt")
read_list = list_open.read()
line_in_list = read_list.split("\n")
for url in line_in_list:
Beautiful = urllib2.urlopen(url).read()
beautiful
soup = bs4.BeautifulSoup(beautiful)
for news in soup:
print soup.getText()
以下代码帮助我从多个网站中提取文本(weblist.txt)
但是当我的网站列表包含任何未使用此代码打开的链接或网站时,它会立即停止,而不会检查更多链接。假设我有10个链接而第二个没有打开或者无法解析它会给出错误并在该链接中停止而不检查其他链接。我希望它应该从weblist检查每个链接(从头到尾)并从中提取文本所有那些真实或能够解析的链接。
只需添加一个try except语句,如下所示:
for url in line_in_list:
try:
Beautiful = urllib2.urlopen(url).read()
beautiful
soup = bs4.BeautifulSoup(beautiful)
for news in soup:
print soup.getText()
except Exception as e:
#Error handling
print(e)