1 bs4搜索文档树
from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p id="my p" class="title">asdfasdf<b id="bbb" class="boldest">The Dormouse's story</b>
</p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'lxml')
'''
# 总结:
1 find和find_all
2 5 种搜索方法
3 结合遍历文档树一起使用,提交查询速度
'''
2 css选择器
from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p id="my p" class="title">asdfasdf<b id="bbb" class="boldest">The Dormouse's story</b>
</p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'lxml')
# res=soup.select('a')
# res=soup.select('#link1')
# res=soup.select('.sister')
# res=soup.select('body>p>a')
# 只需要会了css选择,几乎所有的解析器[bs4,lxml...],都会支持css和xpath
# res=soup.select('body>p>a:nth-child(2)')
# res=soup.select('body>p>a:nth-last-child(1)')
# [attribute=value]
res=soup.select('a[href="http://example.com/tillie"]')
print(res)
'''
记住的:
1 标签名
2 .类名
3 #id号
4 body a body下子子孙孙中得a
5 body>a body下子的a,没有孙
6 其他的参照css选择器
'''
3 selenium基本使用
-使用requests获取回来的数据,跟直接在浏览器中看到的数据,可能不一样
-requests不能执行js
-如果使用requets,需要分析当次请求发出了多少请求,每个都要发送一次,才能拼凑出网页完整的数据
selenium最初是一个自动化测试工具,而爬虫中使用它主要是为了解决requests无法直接执行JavaScript代码的问题
selenium本质是通过驱动浏览器,完全模拟浏览器的操作,比如跳转、输入、点击、下拉等,来拿到网页渲染之后的结果,可支持多种浏览器
-安装模块:pip3 install selenium
-下载浏览器驱动:selenium操作浏览器,需要有浏览器(谷歌浏览器),谷歌浏览器驱动
-https://registry.npmmirror.com/binary.html?path=chromedriver/
-浏览器版本对应的驱动
106.0.5249.119 找到相应的驱动
-写代码测试
from selenium import webdriver
import time
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.baidu.com')
time.sleep(3)
bro.close()
bro.quit()
4 无界面浏览器
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument('--headless')
bro = webdriver.Chrome(executable_path='./chromedriver.exe', options=chrome_options)
bro.get('https://www.jd.com/')
print(bro.page_source)
time.sleep(3)
bro.close()
bro.quit()
5 selenium其它用法
5.0 小案例,自动登录百度
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.baidu.com')
bro.implicitly_wait(10)
bro.maximize_window()
a = bro.find_element(by=By.LINK_TEXT, value='登录')
a.click()
input_name = bro.find_element(by=By.ID, value='TANGRAM__PSP_11__userName')
input_name.send_keys('33333@qq.com')
time.sleep(1)
input_password = bro.find_element(by=By.ID, value='TANGRAM__PSP_11__password')
input_password.send_keys('lqz12345')
time.sleep(1)
input_submit = bro.find_element(by=By.ID, value='TANGRAM__PSP_11__submit')
input_submit.click()
time.sleep(5)
bro.close()
5.1 获取位置属性大小,文本
bro.find_element(by=By.ID,value='id号')
bro.find_element(by=By.LINK_TEXT,value='a标签文本内容')
bro.find_element(by=By.PARTIAL_LINK_TEXT,value='a标签文本内容模糊匹配')
bro.find_element(by=By.CLASS_NAME,value='类名')
bro.find_element(by=By.TAG_NAME,value='标签名')
bro.find_element(by=By.NAME,value='属性name')
bro.find_element(by=By.CSS_SELECTOR,value='css选择器')
bro.find_element(by=By.XPATH,value='xpath选择器')
print(code.location)
print(code.size)
-------
print(code.tag_name)
print(code.id)
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import base64
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://kyfw.12306.cn/otn/resources/login.html')
a = bro.find_element(by=By.LINK_TEXT, value='扫码登录')
a.click()
code = bro.find_element(by=By.CSS_SELECTOR, value='#J-qrImg')
print(code.id)
print(code.location)
print(code.tag_name)
print(code.size)
print(code.location)
print(code.size)
print(code.id)
print(code.tag_name)
s = code.get_attribute('src')
print(s)
with open('code.png','wb') as f:
res=base64.b64decode(s.split(',')[-1])
f.write(res)
time.sleep(3)
bro.close()
5.2 等待元素被加载
-显示等待:一般不用,需要指定等待哪个标签,如果标签很多,每个都要设置比较麻烦
-隐士等待:
bro.implicitly_wait(10)
find找标签的时候,如果找不到,等最多10s钟
5.3 元素操作
标签.click()
标签.send_keys('文字')
标签.clear()
from selenium.webdriver.common.keys import Keys
input_search.send_keys(Keys.ENTER)
5.4 执行js代码
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')
bro.execute_script('scrollTo(0,document.body.scrollHeight)')
time.sleep(3)
bro.close()
5.5 切换选项卡
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')
bro.execute_script('window.open()')
bro.switch_to.window(bro.window_handles[1])
bro.get('http://www.taobao.com')
time.sleep(2)
bro.switch_to.window(bro.window_handles[0])
time.sleep(3)
bro.close()
bro.quit()
5.6 浏览器前进后退
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')
time.sleep(2)
bro.get('https://www.taobao.com/')
time.sleep(2)
bro.get('https://www.baidu.com/')
bro.back()
time.sleep(1)
bro.forward()
time.sleep(3)
bro.close()
5.7 异常处理
from selenium.common.exceptions import TimeoutException,NoSuchElementException,NoSuchFrameException
try:
except Exception as e:
print(e)
finally:
bro.close()
6 selenium登录cnblogs获取cookie
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import json
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.cnblogs.com/')
bro.implicitly_wait(10)
time.sleep(3)
with open('cnblogs.json','r',encoding='utf-8') as f:
cookie=json.load(f)
for item in cookie:
bro.add_cookie(item)
bro.refresh()
time.sleep(10)
bro.close()
7 抽屉半自动点赞
-使用requests登录,非常难登录,因为有验证码
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import json
import requests
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://dig.chouti.com/')
bro.implicitly_wait(10)
try:
submit = bro.find_element(by=By.ID, value='login_btn')
bro.execute_script("arguments[0].click()", submit)
time.sleep(2)
username = bro.find_element(by=By.NAME, value='phone')
username.send_keys('18953675221')
password = bro.find_element(by=By.NAME, value='password')
password.send_keys('lqz123')
time.sleep(3)
submit_button = bro.find_element(By.CSS_SELECTOR,
'body > div.login-dialog.dialog.animated2.scaleIn > div > div.login-footer > div:nth-child(4) > button')
submit_button.click()
input()
cookie = bro.get_cookies()
print(cookie)
with open('chouti.json', 'w', encoding='utf-8') as f:
json.dump(cookie, f)
div_list = bro.find_elements(By.CLASS_NAME, 'link-item')
l = []
for div in div_list:
article_id = div.get_attribute('data-id')
l.append(article_id)
except Exception as e:
print(e)
finally:
bro.close()
print(l)
with open('chouti.json', 'r', encoding='utf-8')as f:
cookie = json.load(f)
request_cookies = {}
for item in cookie:
request_cookies[item['name']] = item['value']
print(request_cookies)
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'
}
for i in l:
data = {
'linkId': i
}
res = requests.post('https://dig.chouti.com/link/vote', data=data, headers=header, cookies=request_cookies)
print(res.text)