python爬虫详解(四)——使用爬虫爬取你想要的图片 类似于图片下载器【百度引擎】

265 阅读1分钟

这个爬虫用的是百度引擎,后续需要爬取别的网站需要自行更改

点个赞留个关注吧!!

我们废话不多说,上代码:

# -*- coding: utf-8 -*-
import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os

num = 0
numPicture = 0
file = ''
List = []
def Find(url, A):
    global List
    print('检测可下载图片数量,请稍等...')
    t = 0
    s = 0
    while t < 1000:
        Url = url + str(t)
        try:
            # 这里搞了下
            Result = A.get(Url, timeout=7, allow_redirects=False)
        except BaseException:
            t = t + 60
            continue
        else:
            result = Result.text
            pic_url = re.findall('"objURL":"(.*?)",', result, re.S)
            s += len(pic_url)
            if len(pic_url) == 0:
                break
            else:
                List.append(pic_url)
                t = t + 60
    return s


def recommend(url):
    F = []
    try:
        html = requests.get(url, allow_redirects=False)
    except error.HTTPError as e:
        return
    else:
        html.encoding = 'utf-8'
        bsObj = BeautifulSoup(html.text, 'html.parser')
        div = bsObj.find('div', id='topRS')
        if div is not None:
            listA = div.findAll('a')
            for i in listA:
                if i is not None:
                    F.append(i.get_text())
        return F


def dowmloadPicture(html, keyword):
    global num
    # t =0
    pic_url = re.findall('"objURL":"(.*?)",', html, re.S)
    print('找到关键词:' + keyword + '的图片,即将开始下载图片...')
    for each in pic_url:
        print('正在下载第' + str(num + 1) + '张图片,图片地址:' + str(each))
        try:
            if each is not None:
                pic = requests.get(each, timeout=7)
            else:
                continue
        except BaseException:
            print('图片下载失败!!!')
            continue
        else:
            string = file + r'\\' + keyword + '_' + str(num) + '.jpg'
            fp = open(string, 'wb')
            fp.write(pic.content)
            fp.close()
            num += 1
        if num >= numPicture:
            return


if __name__ == '__main__':
    headers = {
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
        'Upgrade-Insecure-Requests': '1'
    }
    A = requests.Session()
    A.headers = headers
    word = input("关注博主不迷路!!!\n\nhttps://jiangongfang.blog.csdn.net/\nhttps://blog.51cto.com/u_15449377\n\n-------------------------爬取图片神器------------------------- \n请输入要搜索图片的关键词: ")
    url = 'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn='

    # 这里搞了下
    tot = Find(url, A)
    Recommend = recommend(url)  #记录相关推荐
    print('经过检测【%s】类的图片总共有【%d】张' % (word, tot))
    numPicture = int(input('请输入要下载的图片数量:'))
    file = input('请输入要新建的文件夹名称:')

    y = os.path.exists(file)   #检测文件夹是否存在
    if y == 1:
        print('该文件夹已存在,请重新输入:')
        file = input('请输入要新建的文件夹名称:')
        os.mkdir(file)
    else:
        os.mkdir(file)
    t = 0
    tmp = url
    while t < numPicture:
        try:
            url = tmp + str(t)
            result = A.get(url, timeout=10, allow_redirects=False)
        except error.HTTPError as e:
            print('网络有问题,请关闭代理或调整网络再重试!!!')
            t = t + 60
        else:
            dowmloadPicture(result.text, word)
            t = t + 60

    print('爬取图片下载完毕,关注博客-带你探路!!')
    for re in Recommend:
        print(re, end='  ')

我们爬取一下妹子图片,来看效果:

 现在已经保存到了图片文件夹中了