'''
批量下载豆瓣首页图片
采用伪装浏览器的方式爬取豆瓣网站首页的图片,保存到指定路径下
'''
import urllib.request,socket,re,sys,os
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
targetPath = "//Users//wangleilei//Documents//03__douban_Images"
def saveFile(path):
if not os.path.isdir(targetPath):
os.mkdir(targetPath)
pos = path.rindex('/')
t = os.path.join(targetPath,path[pos+1:])
return t
url = "https://www.douban.com/"
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:57.0) Gecko/20100101 Firefox/57.0'}
request = urllib.request.Request(url=url,headers=headers)
response = urllib.request.urlopen(request)
data = response.read()
print(data)
for link, t in set(re.findall(r'(https:[^\s]*?(png|gif|jpg))', str(data))):
print(link)
try:
urllib.request.urlretrieve(link, saveFile(link))
print('成功')
except:
print('失败')
我的Python3爬虫系列
原文参照Python3 爬虫系列教程