本文已参与「新人创作礼」活动,一起开启掘金创作之路。
# -*- coding: utf-8 -*-
import sys
import re # 正则表达式,进行文字匹配
from bs4 import BeautifulSoup # (网页解析,获取数据)
import urllib.request, urllib.error # 制定URL,获取网页数据,urllib.request urllib.error
import xlwt # 进行Excel操作
import sqlite3 # 进行sqlite数据库操作
def main():
# 爬取网页
# 解析数据
# 保存数据
# headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.78"}
# req = urllib.request.Request(url=url,headers=headers)
# response = urllib.request.urlopen(req)
# 上面为封装,伪装浏览器的headers
baseurl = "https://movie.douban.com/top250?start="
datalist = getdata(baseurl)
# savepath = "豆瓣电影top250.xls"
dbpath = "movie.db"
saveData2DB(datalist, dbpath)
# 保存数据
# savedata(datalist,savepath)
# askURL("https://movie.douban.com/top250?start=")
findlink = re.compile(r'<a href="(.*?)">') # 找链接
findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S) # 图片
findTitle = re.compile(r'<span class="title">(.*)</span>') # 片名
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>') # 评分
findJudge = re.compile(r'<span>(\d*)人评价</span>') # 评价人数
findInq = re.compile(r'<span class="inq">(.*)</span>') # 概况
findBd = re.compile(r'<p class="">(.*?)</p>', re.S) # 简介
# 爬取网页
def getdata(baseurl):
datalist = []
for i in range(0, 10):
url = baseurl + str(i * 25)
html = askURL(url)
# 逐一解析数据
soup = BeautifulSoup(html, "html.parser")
for item in soup.find_all('div', class_="item"):
# print(item) 测试:查看电影item所有信息
data = [] # 保存信息
item = str(item)
link = re.findall(findlink, item)[0]
# print(link)#影片链接
data.append(link) # 添加影片链接
imgSrc = re.findall(findImgSrc, item)[0]
data.append(imgSrc) # 添加图片
titles = re.findall(findTitle, item)
if (len(titles) == 2):
ctitle = titles[0] # 添加中国名
data.append(ctitle)
otitle = titles[1].replace("/", "") # 去掉无关符号
data.append(otitle) # 添加外国名
else:
data.append(titles[0])
data.append(' ') # 留空
rating = re.findall(findRating, item)[0]
data.append(rating) # 分数
judgenum = re.findall(findJudge, item)[0]
data.append(judgenum) # 评价人数
inq = re.findall(findInq, item) # 概述
if len(inq) != 0:
inq = inq[0].replace("。", "") # 去掉句号
data.append(inq)
else:
data.append(" ")
bd = re.findall(findBd, item)[0]
bd = re.sub('<br(\s+)?/>(\s+)?', " ", bd)
bd = re.sub('/', " ", bd)
data.append(bd.strip()) # 去掉空格
datalist.append(data) # 将所有信息放入datalist
# {
# "args": {},
# "headers": {
# "Accept": "application/json",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
# "Host": "httpbin.org",
# "Referer": "http://httpbin.org/",
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.78",
# "X-Amzn-Trace-Id": "Root=1-6126847c-1d2b4dbf38d621cb3d2fa729"
# },
# "origin": "39.149.239.203",
# "url": "http://httpbin.org/get"
# }
return datalist
def askURL(url):
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.78"}
request = urllib.request.Request(url, headers=head) # 封装
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode('utf-8', errors='ignore')
# print(html)
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e, code)
if hasattr(e, "reason"):
print(e, reason)
return html
# 保存数据
def savedata(datalist, savepath):
print("save....")
book = xlwt.Workbook(encoding="utf-8", style_compression=0) # 创建workbook对象
sheet = book.add_sheet("豆瓣电影top250", cell_overwrite_ok=True) # 创建工作表
col = ("电影链接", "图片链接", "中文名", "外国名", "评分", "评价人数", "概况", "相关信息")
for i in range(0, 8):
sheet.write(0, i, col[i])
for i in range(0, 250):
print("第%d条" % (i + 1))
data = datalist[i]
for j in range(0, 8):
sheet.write(i + 1, j, data[j])
book.save(savepath)
def saveData2DB(datalist, dbpath):
print("loading.................")
init_db(dbpath)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
for data in datalist:
for index in range(len(data)):
if index == 4 or index == 5:
continue
data[index] = '"' + data[index] + '"'
sql = '''
insert into movie250_2(
info_link,pic_link,cname,ename,score,rated,instroduction,info)
values(%s)''' % ",".join(data)
print(sql)
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
print("已完成数据库操作")
def init_db(dbpath):
sql = '''
create table movie250_2
(
id integer primary key autoincrement,
info_link text,
pic_link text,
cname varchar,
ename varchar,
score numeric,
rated numeric,
instroduction text,
info,text
)
'''
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
if __name__ == "__main__":
main()
print("爬取完毕!")
经典系列啊,爬取豆瓣电影信息,但是我觉得还是有点技术在里面的 ,跟大家分享一下我的技术心得: