requests, BeauitfulSoup

requests

requests.get()的基本使用

1
2
# 导入
import requests
1
2
# 不带参数get
reponse = requests.get('url')
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 带参数get

headers = {'referer': 'http://xxxxxx.net/',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'}
reponse = requests.get('url',headers=headers)

# 或者这样带参数
r1 = requests.get(url='http://dict.baidu.com/s', params={'wd': 'python'})
print(r1.url)

>>> http://dict.baidu.com/s?wd=python

# 或者直接用url传参数
payload = {'keyword': '香港', 'salecityid': '2'}
r = requests.get("http://m.ctrip.com/webapp/tourvisa/visa_list", params=payload)
print(r.url)

>>> http://m.ctrip.com/webapp/tourvisa/visa_list?salecityid=2&keyword=香港
1
2
# 获取文本
reponse.text
1
2
# 获取图片和视频内容
reponse.content
1
2
# 获取编码
reponse.encoding = 'utf-8'
1
2
# 获取状态码
reponse.status_code
1
2
# 超时时间设置
r = requests.get('http://m.ctrip.com', timeout=0.001)
1
2
#第一次get/post的时候网站会返回一个cookies,有些时候post登录需要带cookies,以下语句用于获取cookies
reponse_cookie_dic = reponse.cookies.get_dict()

requests.post()的基本使用,提交登录信息或者把数据传给服务器的时候可以用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# post的基本使用方法

# 提交的参数
post_data = {
"phone": '86'+'01234567890',
'password': '123',
'oneMonth': 1 # 一个月内免登陆
}
# 一定要添加浏览器,不然可能会遇到网络防火墙
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 Firefox/58.0'}

response = requests.post(
url='https://xxxxx.com/login',
headers =headers,
data=post_data,
)
1
2
3
4
5
6
7
8
9
10
#返回的cookies
reponse_cookie_dic = reponse.cookies.get_dict()

# 用上面得到的cookies再次请求
response = requests.post(
url='https://xxxxx.com/login',
headers =headers,
data=post_data,
cookies=reponse_cookie_dic
)
1
2
3
4
# 使用代理来避免IP重复,格式是proxy_dict={'order'":'ip'+':'+'port'}的字典,可以搞一个ip代理池,用完就丢。
# proxies处就填入
response = requests.get(target_url, headers=headers, proxies=proxy_dict, timeout=30)

requests.session,自动保持cookies,不需要手动维护cookies

1
2
3
4
s = requests.Session()

# get请求
target_response = s.get(url=target_url, headers=target_headers)

配置超时及重连次数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from requests.adapters import HTTPAdapter
headers = dict() #创建字典
headers["User-Agent"] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
headers["Accept-Encoding"] = "gzip, deflate, sdch"
headers["Accept-Language"] = "zh-CN,zh;q=0.8"
headers["Accept-Language"] = "zh-CN,zh;q=0.8"
request_retry = HTTPAdapter(max_retries=3) #配置超时及重连次数

def my_get(url, refer=None):
session = requests.session()
session.headers = headers
if refer:
headers["Referer"] = refer
session.mount('https://', request_retry) #mount挂载特定对话 代理
session.mount('http://', request_retry)
return session.get(url)

最简单的实现:靠random模块做ip池和user-agent的随机分配

1
2
3
4
5
6
7
8
9
10
#设置用户代理池
header_list = ["Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3704.400 QQBrowser/10.4.3587.400"]
#设置ip池
ip_pools = ["123.54.44.4:9999",
"110.243.13.120:9999",
"183.166.97.101:9999"]

random_ip = random.choice(ip_pools)
header = ("User-Agent", random.choice(header_list))

BeautifulSoup

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# 导入
from bs4 import BeautifulSoup

# 解析,'html.parser'也可以改成'lxml'格式,lxml是唯一支持xml格式的解析器
soup = BeautifulSoup(reponse.text,'html.parser')

# 找到第一个标签,可以是a,div等任意html支持的标签
tag1 = soup.find('a')

#找到第一个id是d1的标签
tag2 = soup.find(id='d1')

# 找到第一个id是d1的div标签
tag3 = find('div',id='d1')

# find_all,返回所有满足条件的标签,这是个可迭代对象,用for遍历
# attrs填满足查询条件的属性
all_div = soup.find_all('div',attrs={'class':'card'})
for div in all_div:
print(div.img['src'])

# select CSS选择器
soup.select("title")
soup.select("body a")
soup.select("#link1")
soup.select('a[href]')
soup.select("p > a")
li_list = soup.select("div.postlist ul#pins li")
href = bs(response.content, "lxml").select_one("div.main-image img").attrs["src"]

写入文件

1
2
3
4
5
6
7
8
# mkdir直接在当前.py文件的文件夹路径平级建立一个文件
if not os.path.exists("img"):
os.mkdir("img")
if not os.path.exists("img/" + str(start_num)):
os.mkdir("img/" + str(start_num))
with open("img/" + str(start_num) + "/" + img_name + ".jpg", 'wb') as fs:
fs.write(src_reponse.content)
print ("download success!")

requests, BeauitfulSoup
http://example.com/2024/07/21/requests, BeauitfulSoup/
作者
xiao cuncun
发布于
2024年7月21日
许可协议