Python2.7爬取acg178全站(大雾)

556 阅读5分钟

总共四个文件,新手,大佬勿喷,欢迎指导

1,爬取首页所有栏目并入MySQL,这个是单独运行的,因为总共就16个栏目,所以只是练手,并没有在此之后继续爬后续的文章内容

# -*- coding: UTF-8 -*-
import re
from lxml import html
import pymysql

from Tools import ToolsMethods

connect = pymysql.connect(
host='localhost', 
user='root', 
passwd='44124985',  
db='acgcloumn',  
port=3306,  
charset='utf8'
)
# 获取游标
cursor = connect.cursor()

def getCloumnData(tree):
    first_titleArr = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/a/text()')
    first_urlList = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/a/@href')
    pid = 0
    #插入主栏目
    cursor.execute("use acgcloumn")
    for i in range(len(first_titleArr)):       
        cursor.execute("insert into cloumn_tab (name,weburl,pid) values (%s,%s,%s)",(first_titleArr[i],first_urlList[i],0))
    connect.commit()

    sub_countArr = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/ul[@class="ui-nav-submain"]/li[contains(@class,"ui-nav-subitem")]/a/text()')

    sub_titleArr = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/ul/li/a/text()')
    sub_urlList = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/ul/li/a/@href')
    cursor.execute("use acgcloumn")   
    for j in range(len(sub_titleArr)):
            cursor.execute("insert into cloumn_tab (name,weburl) values (%s,%s)",(sub_titleArr[j],sub_urlList[j]))
    connect.commit()



if __name__ == '__main__':
    toolsObj = ToolsMethods()
    mytree = toolsObj.getHtmlTrees('http://acg.178.com/')
    getCloumnData(mytree)
    connect.close()

2.这个是爬栏目进来后的页面,这里运行起来就可以把整个这个栏目爬光光

# -*- coding: UTF-8 -*-
import re
from lxml import html
from lxml import etree
import pymysql
import os
import threading
from Tools import ToolsMethods
from ContentDemo import ContentFileMethods

AllTitleImageList = []
ALLNextUrlList = []
DownLoadPicSwithc = 1
finish = False
TitlePagePid = 1#这里就手填吧,标题页的pid是子栏目的id,总共就16个
#图片处理区

def getTotalPages(url):
    global finish
    #获取一共有多少页
    tool_obj = ToolsMethods()
    tree = tool_obj.getHtmlTrees(url)
    maxPageList = tree.xpath('//div[@class="splitpage"]/div/a/@href')
    for e in maxPageList:
        print e
        noLoaded = tool_obj.stringIntoMD5(e)
        if noLoaded == True:
            getPageTitleUrlAndImage(e,maxPageList)            
        else:
            print '这个加载过'             
        
    
def getPageTitleUrlAndImage(url,arr):
    #获取每个页面的跳转路径和图片路径
    tool_obj = ToolsMethods()
    tree = tool_obj.getHtmlTrees(url)
    allNextUrlList = []
    allTitleImgList = []
    nextUrlList = tree.xpath('//div[@class="imgtextlist"]/ul/li/div/a/@href')
    tmpImgList = tree.xpath('//div[@class="imgtextlist"]/ul/li/div/a/img/@src')
    ALLNextUrlList.extend(nextUrlList)
    AllTitleImageList.extend(tmpImgList)
    if url == arr[-1]:#如果传进来的url是当前数组的最后一位就加载这个url看是否还有后续的url
        getTotalPages(url)

    
def savePicIn():
    if (DownLoadPicSwithc == 1):
        print '开始下载图片'
        path  = 'titleImg'
        os.makedirs('f:\\acgTitleImg\\'+path+'\\')
        toolsObj = ToolsMethods()
        for picUrl in AllTitleImageList:
            print picUrl
            mat = re.search(r'([0-9]+\.jpg|[0-9]+\.png|[0-9]+\.gif)',str(picUrl))#生成图片名称
            if mat:
                picName = mat.group(0)
                toolsObj.downLoadPicture(picUrl,picName,path)

        print '图片下载完成' 

#标题跳转处理区
def getArticlePageContent(list_url,next_pid):
    content_obj = ContentFileMethods()
    content_obj.receivePageUrl(list_url,next_pid)

if __name__ == '__main__':
    toolsObj = ToolsMethods()
    urllist,idlist= toolsObj.getTitleUrl()#返回ID作为标题页的pid,然后idlist的下标是内容页的pid,注意下标0其实pid是1
    #每个子栏目下面的所有文章标题页,首先要获取最大页数,然后获取所有的标题URL
    getTotalPages(urllist[0])
    # for url in urllist:
        # getTotalPages(url)
    #拿到了跳转链接和图片,下载图片存入本地
    # savePicIn()#先不下载图片以防被屏蔽,另外标题页的图片没有入库,标题页也没有爬内容源码,这行基本不用
    print '图片保存完成'
    getArticlePageContent(ALLNextUrlList,1)#这里1是写死因为urllist[0],到时候从for里取就ok        

    
    print '全部完成'

3.接第二步,对文章详情页进行解析并入MySQL

# -*- coding: UTF-8 -*-
import re
from lxml import html
import pymysql

from Tools import ToolsMethods

connect = pymysql.connect(
host='localhost', 
user='root', 
passwd='44124985',  
db='acgcloumn',  
port=3306,  
charset='utf8'
)
# 获取游标
cursor = connect.cursor()

def getCloumnData(tree):
    first_titleArr = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/a/text()')
    first_urlList = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/a/@href')
    pid = 0
    #插入主栏目
    cursor.execute("use acgcloumn")
    for i in range(len(first_titleArr)):       
        cursor.execute("insert into cloumn_tab (name,weburl,pid) values (%s,%s,%s)",(first_titleArr[i],first_urlList[i],0))
    connect.commit()

    sub_countArr = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/ul[@class="ui-nav-submain"]/li[contains(@class,"ui-nav-subitem")]/a/text()')

    sub_titleArr = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/ul/li/a/text()')
    sub_urlList = tree.xpath('//div[@class="topbar-bg"]/div/div/ul/li/ul/li/a/@href')
    cursor.execute("use acgcloumn")   
    for j in range(len(sub_titleArr)):
            cursor.execute("insert into cloumn_tab (name,weburl) values (%s,%s)",(sub_titleArr[j],sub_urlList[j]))
    connect.commit()



if __name__ == '__main__':
    toolsObj = ToolsMethods()
    mytree = toolsObj.getHtmlTrees('http://acg.178.com/')
    getCloumnData(mytree)
    connect.close()

4.这个是自己用的工具类,很烂,大佬轻喷,可以不用看,欢迎指导

# -*- coding: UTF-8 -*-
import random
import re
import requests
import pymysql
import hashlib
import os
from lxml import html


HaveLoadList = [] 

class ToolsMethods(object):

    def getHeads(self):
        agentList = ['Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50','Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50','Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0','Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko','Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0','User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:56.0) Gecko/20100101 Firefox/56.0','User-Agent: Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Mobile Safari/537.36','User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5','User-Agent:Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36 Request Payload','User-Agent: Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0)','User-Agent	Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko','User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50','User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50','User-Agent:Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)','User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)','User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)','User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1','User-Agent:Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1','User-Agent:Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11','User-Agent:Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11','User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)','User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)','User-Agent:Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5','User-Agent:Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5','User-Agent:Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5']
        random_num = random.randint(1,len(agentList))
        user_agent = agentList[random_num-1]
        # myHeaders = {'User-Agent': user_agent}
        myHeaders = {
            'Host' : 'acg.178.com',
            'User-Agent' : user_agent,
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Connection':'keep-alive'
        }
        return myHeaders

    def getTitleUrl(object):
        connect = pymysql.connect(
        host='localhost', 
        user='root', 
        passwd='44124985',  
        db='acgcloumn',  
        port=3306,  
        charset='utf8'
        )
        # 获取游标
        cursor = connect.cursor()
        cursor.execute("use acgcloumn")
        sql = 'select weburl,id from cloumn_tab where id > 8'
        cursor.execute(sql)
        result = cursor.fetchall()
        urlList = []
        idlist = []
        for a in result:
            urlList.append(a[0])
            idlist.append(a[1])
        connect.commit()
        connect.close()
        return (urlList,idlist)


    def getHtmlTrees(self,pageUrl):
        toolobj = ToolsMethods()
        headerCls = toolobj.getHeads()
        response = requests.get(pageUrl,headers=headerCls)
        statueCode = response.status_code
        page_content = response.text
        if statueCode == 404:
            print 404
            return 404
        tree = html.fromstring(page_content)
        return tree

    def stringIntoMD5(self,str):
        m = hashlib.md5()
        m.update(str)
        md5Str = m.hexdigest()
        if md5Str not in HaveLoadList:
            HaveLoadList.append(md5Str)
            return True
        else:
            return False

    def downLoadPicture(self,picUrl,picName,path):
        # urllib.urlretrieve(picUrl,'f:\\acgTitleImg\\'+path+'\\'+picName)#urllib一句代码下载
        tools_obj = ToolsMethods()
        headers = tools_obj.getHeads()
        path_pic = 'f:\\acgTitleImg\\'+path+'\\'+picName
        
        picResponse = requests.get(picUrl)
        if picResponse.status_code == 200:
            open(path_pic, 'wb').write(picResponse.content)