什么是urllib庫
Python內(nèi)置的HTTP請求庫
- urllib.request 請求模塊
- urllib.error 異常處理模塊
- urllib.parse url解析模塊
- urllib.robotparser robots.txt解析模塊
相比Python2的變化
在Python2.x中冻记,這個庫叫做urllib2僚稿,在Python3.x里掐禁,urllib2改名為urllib雹锣,被分成了三個子模塊:
- urllib.request
- urllib.parse
- urllib.error
Python2
import urllib2
response = urllib.urlopen('http://www.baidu.com')
python3
import urllib.request
response = urllib.request.urlopen('http://www.baidu.com')
urlopen函數(shù)
函數(shù)原型
# 函數(shù)原型:
urllib.request.urlopen(url, data=none, [timeout]*, -------- )
# 主要參數(shù)為請求URL、data數(shù)據(jù)和超時設置
基本示例
import urllib.request
response = urllib.request.urlopen('http://www.baidu.com')
print(response.read().decode('utf-8'))
# read()方法是讀取響應體的內(nèi)容
# decode('utf-8') 表示以'utf-8'格式解碼
# encoding='utf-8' 表示以'utf-8'格式編碼
輸出內(nèi)容為百度首頁的源代碼堪遂,太多了秩冈,這里就不貼了。
在urlopen中攜帶data數(shù)據(jù)
import urllib.request
import urllib.parse
data = bytes(urllib.parse.urlencode({'word':'hello'}), encoding='utf-8')
# 在urlopen中攜帶data數(shù)據(jù)
# http://httpbin.org是一個測試HTTP請求的網(wǎng)站
response = urllib.request.urlopen('http://httpbin.org/post', data=data)
print(response.read())
b'{"args":{},"data":"","files":{},"form":{"word":"hello"},"headers":{"Accept-Encoding":"identity","Connection":"close","Content-Length":"10","Content-Type":"application/x-www-form-urlencoded","Host":"httpbin.org","User-Agent":"Python-urllib/3.6"},"json":null,"origin":"117.139.10.7","url":"http://httpbin.org/post"}\n'
設置超時參數(shù)
import urllib.request
# 設置超時參數(shù)
response = urllib.request.urlopen('http://httpbin.org/get', timeout=1)
print(response.read())
b'{"args":{},"headers":{"Accept-Encoding":"identity","Connection":"close","Host":"httpbin.org","User-Agent":"Python-urllib/3.6"},"origin":"117.139.10.7","url":"http://httpbin.org/get"}\n'
import urllib.request
import urllib.error
import socket
try:
response = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
if isinstance(e.reason, socket.timeout):
print('TIME OUT')
TIME OUT
響應
響應類型
import urllib.request
response = urllib.request.urlopen('http://www.python.org')
print(type(response))
<class 'http.client.HTTPResponse'>
狀態(tài)碼沃测、響應頭
import urllib.request
response = urllib.request.urlopen('https://www.python.org')
print(response.status) # 輸出狀態(tài)碼
print(response.getheaders())
print(response.getheader('Server'))
200
[('Server', 'nginx'), ('Content-Type', 'text/html; charset=utf-8'), ('X-Frame-Options', 'SAMEORIGIN'), ('x-xss-protection', '1; mode=block'), ('X-Clacks-Overhead', 'GNU Terry Pratchett'), ('Via', '1.1 varnish'), ('Content-Length', '48703'), ('Accept-Ranges', 'bytes'), ('Date', 'Tue, 29 May 2018 10:57:05 GMT'), ('Via', '1.1 varnish'), ('Age', '932'), ('Connection', 'close'), ('X-Served-By', 'cache-iad2148-IAD, cache-lax8633-LAX'), ('X-Cache', 'HIT, HIT'), ('X-Cache-Hits', '1, 14'), ('X-Timer', 'S1527591425.014404,VS0,VE0'), ('Vary', 'Cookie'), ('Strict-Transport-Security', 'max-age=63072000; includeSubDomains')]
nginx
Request
# urlopen函數(shù)不能攜帶headers信息
# Request函數(shù)可以攜帶headers等信息
import urllib.request
request = urllib.request.Request('https://www.python.org')
response = urllib.request.urlopen(request)
print(response.code)
200
攜帶data數(shù)據(jù)和headers信息的Request請求
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
dict = {
'name' : 'germey'
}
data = bytes(parse.urlencode(dict), encoding='utf-8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
{"args":{},"data":"","files":{},"form":{"name":"germey"},"headers":{"Accept-Encoding":"identity","Connection":"close","Content-Length":"11","Content-Type":"application/x-www-form-urlencoded","Host":"httpbin.org","User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36"},"json":null,"origin":"117.139.10.7","url":"http://httpbin.org/post"}
cookie
# cookie是用來保存登陸狀態(tài)的
import http.cookiejar, urllib.request
cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
for item in cookie:
print(item.name+ " = " +item.value)
BAIDUID = E2078AB08DD6A6FE566A65305B8E1944:FG=1
BIDUPSID = E2078AB08DD6A6FE566A65305B8E1944
H_PS_PSSID = 1460_21080_26430
PSTM = 1527595557
BDSVRTM = 0
BD_HOME = 0
將cookie信息保存下來
import http.cookiejar, urllib.request
# 將cookie信息保存為文本文檔
filename = 'cookie.txt'
cookie = http.cookiejar.MozillaCookieJar(filename) # 谷歌瀏覽器的cookie保存格式
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
cookie.save(ignore_discard=True, ignore_expires=True) # 使用save方法將cookie保存下來
使用load方法將讀取已保存好的cookie信息
import http.cookiejar, urllib.request
cookie = http.cookiejar.MozillaCookieJar()
# 使用load方法將讀取已保存好的cookie信息
# 將這個cookie再次放在request中請求網(wǎng)頁
cookie.load('cookie.txt', ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
print(response.code)
200
異常處理
from urllib import request, error
try:
response = request.urlopen('http://www.reibang.com/index.html')
except error.URLError as e:
print(e.reason)
Forbidden
驗證異常的具體類型
import socket
import urllib.request
import urllib.error
try:
response = urllib.request.urlopen('http://www.baidu.com', timeout=0.01)
except urllib.error.URLError as e: # 驗證異常的具體類型
print(type(e.reason))
if isinstance(e.reason, socket.timeout):
print('TIME OUT')
<class 'socket.timeout'>
TIME OUT
urlparse
函數(shù)原型
# 函數(shù)原型
urllib.parse.urlparse(urlstring, scheme="", allow_fragments=True)
# 參數(shù)scheme指的是協(xié)議類型
示例:
from urllib.parse import urlparse
result = urlparse('http://www.baidu.com/index缭黔。html;user?id=5#commont')
print(type(result), result)
<class 'urllib.parse.ParseResult'> ParseResult(scheme='http', netloc='www.baidu.com', path='/index。html', params='user', query='id=5', fragment='commont')
urlunparse
# urlunparse函數(shù)是urlparse函數(shù)的反函數(shù)蒂破,可以用來拼接URL
from urllib.parse import urlunparse
data = ['http', 'www.baidu.com', 'index.html', 'user', 'id=5', 'comment']
print(urlunparse(data))
http://www.baidu.com/index.html;user?id=5#comment
urljoin
# 用來拼接url
from urllib.parse import urljoin
# 以后面的url為基準馏谨,將兩個url進行拼接或者覆蓋前一個url
print(urljoin('http://www.baidu.com', 'FAQ.html'))
print(urljoin('http://www.baidu.com', 'https://www.baidu.com/FAQ.html'))
print(urljoin('http://www.baidu.com', 'http://www.reibang.com/u/13b5875d0a63'))
print(urljoin('http://www.reibang.com', 'u/13b5875d0a63'))
http://www.baidu.com/FAQ.html
https://www.baidu.com/FAQ.html
http://www.reibang.com/u/13b5875d0a63
http://www.reibang.com/u/13b5875d0a63
urlencode
# urlencode將字典對象轉(zhuǎn)換為get請求參數(shù)
from urllib.parse import urlencode
params = {
"name": "gemmry",
'age': 22
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url)
http://www.baidu.com?name=gemmry&age=22
urllib庫常用函數(shù)大致就是這些,其實這個還是比較繁瑣的附迷,最好用的HTTP請求庫當然是requests了惧互,下次再來了解下吧哎媚。
每天學習一點點,每天進步一點點喊儡。