Python crawler - Day1(PM)
  GspnLdKbz2va 2023年11月02日 42 0

1.set_cookie.py

import requests
import json

# 百度句子翻译的URL
url = "https://fanyi.baidu.com/basetrans"
# 要传递的post参数(注意替换为自己浏览器看到的token、sign值)
data = {
    "query": "happy every day",
    "from": "en",
    "to": "zh",
    "token": "0fa5f596f21877f6b1191dac4ba48d77",
    "sign": "342945.120976"
}
# 模拟浏览器
headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
}

# 设置cookie (注意替换为自己浏览器中的cookies)
cookies_str = "REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1603939083,1604160877,1604284147,1604302762; BAIDUID=03FCB421FD4EC30952E327D9C34CF51B:FG=1; Hm_lvt_afd111fa62852d1f37001d1f980b6800=1604307516; OUTFOX_SEARCH_USER_ID_NCOO=50333195.918042414; Hm_lpvt_afd111fa62852d1f37001d1f980b6800=1604310327; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1604310327; yjs_js_security_passport=9b35601fbc205c08dbac5495e045e254019a3321_1604310333_js; ___rl__test__cookies=1604310333308"
# 将字符串cookie转换为字典
cookies_dict = {temp[:temp.find("=")]: temp[temp.find("=")+1:] for temp in cookies_str.split("; ")}
# print(cookies_dict)  # 查看 将字符串的cookies转换为字典形式之后的样子

# 发送post请求
r = requests.post(url=url, data=data, headers=headers, cookies=cookies_dict)

# print(r.request.headers)  # 查看请求头
# print(r.headers)  # 查看响应头

ret = json.loads(r.content.decode())
print(ret["trans"][0]["dst"])

2.get_cooke.py

import requests

url = "https://fanyi.baidu.com/basetrans"

headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
}

res = requests.get(url,headers=headers)
print(res.request.headers)
print(res.headers)                                                  # 获取 response的 headers

print("\n")
cookies = requests.utils.dict_from_cookiejar(res.cookies)           # 获取 cookie
print(cookies)

3.redirect.py

import requests

url = "http://www.360buy.com/"
headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
}

# r = requests.get(url,headers=headers,allow_redirects=False)           # 不允许 重定向 
r = requests.get(url,headers=headers)         
print(r.status_code)
print(r.url)                                                            # 获取 未跳跳转的 URL


print("历史请求过程信息:")
for one_info in r.history:                                              # 重定向 历史过程
    print(one_info.status_code, one_info.url, one_info.headers)


print("\n\n最后一次的请求信息:")
print(r.status_code, r.url, r.headers)

4.ssl.py

import requests

url = "https://chinasoftinc.com/owa"
response = requests.get(url,verify=False)               # 使用 非SSL 证书
 

5.timeout.py

import requests

# url = "https://www.baidu.com"
url = "https://www.google.com"

r = requests.get(url=url, timeout=1)        # 如果 1秒连不上,就断开

6.retry_url.py

from retrying import retry
import requests,time

num = 1 

@retry(stop_max_attempt_number=3)
def __parse_url(url):
    global num
    print("第%d次尝试" % num)

    num += 1
    headers = {
        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
    }

    r = requests.get(url,headers=headers,timeout=1)
    assert r.status_code == 200
    return r

def parse_url(url):
    try:
        r = __parse_url(url)
    except Exception as e:
        print("捕获异常" , e)
        r = None
    return r

if __name__ == '__main__':
    url = "https://chinasoftinc.com/owa"
    print("----开始----")
    r = parse_url(url=url)
    print("----结束----", "响应内容为:", r)

7.retry.py

from retrying import retry
import requests,time

num = 1 

@retry(stop_max_attempt_number=3)           # retry 3次
def test():
    global num

    print("num=",num)
    num += 1
    time.sleep(1)
    
    for i in 100:                            # 产生异常
        print("i",i)

if __name__ == "__main__":
    try:
        test()
    except Exception as ret:
        print("产生异常")
        print(ret)
    else:
        print("没有异常")

8.json.py

import requests

headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}

r = requests.post("https://fanyi.baidu.com/sug", headers=headers, json={"kw": "python"}, timeout=3)
print("请求头是:", r.request.headers)
print("请求体是:", r.request.body)

9.session.py

import requests

s = requests.Session()


headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}

# 发送第一个请求
r = s.get('https://www.baidu.com', headers=headers)
print("第一次请求的请求头为:", r.request.headers)
print("响应头:", r.headers)
print("设置的cookie为:", requests.utils.dict_from_cookiejar(r.cookies))


print("\n")
# 发送第二个请求()
r = s.get("https://www.baidu.com")
print("第二次请求的请求头为:", r.request.headers)

10.seesion_with_url.py

import requests

# 1. 创建会话对象
s = requests.Session()

# 2. 登录(就能够得到cookie信息,这个cookie就表示当前用户登录了)
# 薪经咨询登录URL(刘清华的网站)
url = "http://teacher-wang.com:8899/passport/login"
# 伪装浏览器版本
headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}
# 配置要登录的用户名
payload = {
    "mobile": "13146060336",
    "password": "111111"
}
# 进行登录
r = s.post(url=url, headers=headers, json=payload)
# 提取响应
login_dict = r.json()
# 判断登录是否成功
if login_dict.get("errno") == 0:
    print("----登录成功---")
else:
    print("----登录失败---")
    exit()  # 退出程序

# 3. 以登录的身份去获取新闻列表
news_list_url = "http://teacher-wang.com:8899/newslist?page=1&cid=2&per_page=10"
r = s.get(url=news_list_url)
news_list_dict = r.json()
print("获取的新闻列表为", news_list_dict)

11.proxy.py

import requests

ip = "8.209.68.1"
port = 8089

proxies = {
    "http"  : "http://%s:%d"   % (ip,port),
    "https" : "https://%s:%d"  % (ip,port) 
}

headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36"
}

url = "https://www.jixutao.com"

r = requests.get(url=url, headers=headers, proxies=proxies, timeout=1)
print(r.text)
【版权声明】本文内容来自摩杜云社区用户原创、第三方投稿、转载,内容版权归原作者所有。本网站的目的在于传递更多信息,不拥有版权,亦不承担相应法律责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@moduyun.com

  1. 分享:
最后一次编辑于 2023年11月08日 0

暂无评论

推荐阅读
  9JCEeX0Eg8g4   2023年11月25日   37   0   0 ednpython
  eHipUjOuzYYH   2023年12月10日   26   0   0 mysqlsqlUser
  6tuRdFP5lxfF   2023年12月06日   28   0   0 linuxUser