Python crawler - Day1(AM)
  GspnLdKbz2va 2023年11月02日 84 0

1-request.py

import requests

url = "https://www.baidu.com"

response = requests.get(url=url)

print("---状态码如下---")
print(response.status_code)

print("---bytes类型数据:---")
print(response.content)

print("---str类型数据---")
print(response.text)

print("---str类型数据(utf-8)---")
print(response.content.decode("utf-8"))

2-download pic.py

import requests,time

url = "https://www.baidu.com/img/bd_logo1.png"

start_time = time.time()

res = requests.get(url=url)
# print(res.content)
stop_time = time.time()

print("耗时",stop_time-start_time)

with open('baidu.png','wb') as myfile:
    myfile.write(res.content)

3-download by chunk

import requests

url = "http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"

res = requests.get(url=url,stream=True)

file_length = int(res.headers.get("Content-Length"))             # 获取 文件 大小 (字节)
with open("demo.mp4",'wb') as myfile:
    current_file_write_length = 0 

    for chunk in res.iter_content(chunk_size=100):                # 每次 读取 100个字节,并写入
        # length = myfile.write(chunk) 
        # print(length)
        
        current_file_write_length += myfile.write(chunk)          # myfile.write(chunk) 返回当前写入字节数
        print("下载进度为:%.3f%% " % ( 100 * current_file_write_length / file_length))     # 写入文件大小 / 文件总大小 * 100

4-res_request.py

import requests

url = "http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"

headers = {
    "User-Agent" : "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
    "Cookie" :   '''cisession=19dfd70a27ec0eecf1fe3fc2e48b7f91c7c83c60;CNZZDATA1000201968=1815846425-1478580135-https%253A%252F%252Fwww.baidu.com%252F%7C1483922031;Hm_lvt_f805f7762a9a237a0deac37015e9f6d9=1482722012,1483926313;Hm_lpvt_f805f7762a9a237a0deac37015e9f6d9=1483926368'''
}

res = requests.get(url=url,stream=True,headers=headers)     # headers 携带 User-Agent 和 Cookie

print(res.headers)          # 响应头
print("\n")
print(res.request.headers)  # 请求头

5-params.py

import requests

# url = "https://www.baidu.com/s?wd=漂亮"            # 查询

url = "https://www.baidu.com"
query_str = { "wd" : "漂亮" , "python" : 100 }       # 这种 和上面写方法一样,推荐用字典,自动拼接
# 拼接后的结果为:https://www.baidu.com/s?wd=漂亮&python=100


headers = {
    "User-Agent" : "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
    "Cookie" :   '''cisession=19dfd70a27ec0eecf1fe3fc2e48b7f91c7c83c60;CNZZDATA1000201968=1815846425-1478580135-https%253A%252F%252Fwww.baidu.com%252F%7C1483922031;Hm_lvt_f805f7762a9a237a0deac37015e9f6d9=1482722012,1483926313;Hm_lpvt_f805f7762a9a237a0deac37015e9f6d9=1483926368'''
}

res = requests.get(url=url,headers=headers,params=query_str)    # 使用上面字典 方式,接拼查询 
print(res.content)

6-post.py

import requests,json

url = "https://fanyi.baidu.com/sug"

playload = {
    "kw" : "good"                                             # playload
}


headers = {
    "User-Agent" : "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1",
    "Cookie" :   '''cisession=19dfd70a27ec0eecf1fe3fc2e48b7f91c7c83c60;CNZZDATA1000201968=1815846425-1478580135-https%253A%252F%252Fwww.baidu.com%252F%7C1483922031;Hm_lvt_f805f7762a9a237a0deac37015e9f6d9=1482722012,1483926313;Hm_lpvt_f805f7762a9a237a0deac37015e9f6d9=1483926368'''
}

res = requests.post(url=url,headers=headers,data=playload)    # 传入 playload
text_dict = json.loads(res.text)                              # json字符串 转换为 字典

print(text_dict['data'][0]['k'])                              # 取出 k,v
print(text_dict['data'][0]['v'])

7-demo1.py

import requests
import json

# 百度句子翻译的URL
url = "https://fanyi.baidu.com/basetrans"
# 要传递的post参数(注意替换为自己浏览器看到的token、sign值)
data = {
    "query": "happy every day",
    "from": "en",
    "to": "zh",
    "token": "0fa5f596f21877f6b1191dac4ba48d77",
    "sign": "342945.120976"
}
# 模拟浏览器
headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
}

# 字符串cookie(注意替换为自己浏览器中的cookies)
cookies_str = "REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1603939083,1604160877,1604284147,1604302762; BAIDUID=03FCB421FD4EC30952E327D9C34CF51B:FG=1; Hm_lvt_afd111fa62852d1f37001d1f980b6800=1604307516; OUTFOX_SEARCH_USER_ID_NCOO=50333195.918042414; Hm_lpvt_afd111fa62852d1f37001d1f980b6800=1604310327; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1604310327; yjs_js_security_passport=9b35601fbc205c08dbac5495e045e254019a3321_1604310333_js; ___rl__test__cookies=1604310333308"
# 将字符串cookie转换为字典
cookies_dict = {temp[:temp.find("=")]: temp[temp.find("=")+1:] for temp in cookies_str.split("; ")}
# print(cookies_dict)  # 查看 将字符串的cookies转换为字典形式之后的样子

# 发送post请求
r = requests.post(url=url, data=data, headers=headers, cookies=cookies_dict)

# print(r.request.headers)  # 查看请求头
# print(r.headers)  # 查看响应头

ret = json.loads(r.content.decode())
print(ret["trans"][0]["dst"])

8-demo2.py

import requests,json

url = "https://tieba.baidu.com/f"

query_str = {
    "kw" : "美女",
    "ie" : "utf-8",
    "pn" : 0
}

headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
}

for page in range(0,200,50):
    query_str["pn"] = page                      # 查询方式为 https://tieba.baidu.com/f?kw=美女&ie=utf-8&pn=150

    res = requests.get(url,params=query_str)

    file_name = str(query_str["pn"]) + ".html"
    with open(file_name,'wb') as myfile:
        myfile.write(res.content)
【版权声明】本文内容来自摩杜云社区用户原创、第三方投稿、转载,内容版权归原作者所有。本网站的目的在于传递更多信息,不拥有版权,亦不承担相应法律责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@moduyun.com

  1. 分享:
最后一次编辑于 2023年11月08日 0

暂无评论