可以,直接使用,具体案例如下:
#coding:utf-8
#author:
http://www.chenhaifei.com/import requests #打开
import sys #专门乱码的
from bs4 import BeautifulSoup as bs #把html结构化
reload(sys)
sys.setdefaultencoding('utf-8')
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
}
url='
http://www.chenhaifei.com/archive'
print url
cont = requests.get(url,timeout=120,headers=headers).content #通过request获取网页源码
soup = bs(cont, "html.parser") ##使用soup结构话源码
infos = soup.find('ul',{'class':"listing"}).find_all('li') ##批量获取ul下的li标签
for i in infos: ##遍历li的数组
title=i.find('a').text ##获取li下的a标签
date=i.find('span',{'class':"date"}).text #获取li下的span标签
print title,date