爬取搜狐网有关篮球的网站
2021-03-26 22:25
标签:bsp def ret code href 关于 有关 class beautiful 方式一:正则 方式二:bs4 爬取搜狐网有关篮球的网站 标签:bsp def ret code href 关于 有关 class beautiful 原文地址:https://www.cnblogs.com/su-sir/p/12636654.htmlimport requests
import re
contents = requests.get(‘http://www.sohu.com‘)
links = re.findall(‘href="http://www.soscw.com/(.*?)"‘, contents.text)
vaild_link = []
for i in links:
if ‘sohu‘ not in i:
continue
elif re.search(‘\.jpg|\.pdf|\.css|\.ico|\.tif|\.gif|mailto‘, i):
continue
elif i.strip().startswith(‘//‘):
vaild_link.append(‘http:‘+i.strip())
else:
vaild_link.append(i.strip())
for link in vaild_link:
data = requests.get(link)
if "篮球" in data.text:
with open(‘D:\\搜狐网关于篮球的网站.txt‘, ‘a‘, encoding=‘utf-8‘) as f:
f.write(link+‘\n‘)
import requests
from bs4 import BeautifulSoup
import re
def hava_href(tag):
return tag.has_attr(‘href‘)
contents = requests.get(‘http://www.sohu.com‘)
soup = BeautifulSoup(contents.text, ‘html.parser‘)
links = [i.get(‘href‘) for i in soup.find_all(hava_href)]
vaild_link = []
for i in links:
if ‘sohu‘ not in i:
continue
elif re.search(‘\.jpg|\.pdf|\.css|\.ico|\.tif|\.gif|mailto‘, i):
continue
elif i.strip().startswith(‘//‘):
vaild_link.append(‘http:‘+i.strip())
else:
vaild_link.append(i.strip())
for link in vaild_link:
data = requests.get(link)
if "篮球" in data.text:
with open(‘D:\\搜狐网关于篮球的网站.txt‘, ‘a‘, encoding=‘utf-8‘) as f:
f.write(link+‘\n‘)