1:頁面數(shù)據(jù)采集:urllib
from urllib import request
#定義
url ='http://www.baidu.com'
#打開url 設(shè)置超時(shí)鏈接1s
response = request.urlopen(url,timeout=1)
#輸出 讀取response 并以utf-8 輸出 相應(yīng)的編碼解壓(中文):其實(shí)就是查看源代碼
print (response.read().decode('utf-8'))
2.get與post
from urllib import parse
from urllib import request
#parse處理數(shù)據(jù)
#傳遞給url的值
data = bytes(parse.urlencode({'word':'hello'}),encoding='utf8')
#print(data)
response = request.urlopen('http://httpbin.org/post', data=data)
print(response.read().decode('utf-8'))
response2 = request.urlopen('http://httpbin.org/get', timeout=1)
print(response2.read())
# response3 = request.urlopen('http://httpbin.org/get', timeout=0.1)
import urllib
import socket
try:
response3 = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
#套接字鏈接超時(shí)
if isinstance(e.reason, socket.timeout):
print('TIME OUT')
3:http頭部信息模擬:
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "close",
"Cookie": "_gauges_unique_hour=1; _gauges_unique_day=1; _gauges_unique_month=1; _gauges_unique_year=1; _gauges_unique=1",
"Referer": "http://httpbin.org/",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER"
}
dict = {
'name': 'value'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
4:requests庫的請求
# get請求
import requests
url = 'http://httpbin.org/get'
data = {'key': 'value', 'abc': 'xyz'}
# .get是使用get方式請求url惨好,字典類型的data不用進(jìn)行額外處理
response = requests.get(url, data)
#print(response.text)
# post請求
import requests
url = 'http://httpbin.org/post'
data = {'key': 'value', 'abc': 'xyz'}
# .post表示為post方法
response = requests.post(url, data)
# 返回類型為json格式
print(response.json())
5:爬個(gè)頁面獲取鏈接
import requests
import re
content = requests.get('http://www.cnu.cc/discoveryPage/hot-人像').text
#print(content)
# < div class ="grid-item work-thumbnail" >
# < a href="(.*?)".*?title">(.*?)</div>
# < div class ="author" > LynnWei < / div >
#.*?這三個(gè)組合在一起的含義就是 :0個(gè)或者任意個(gè)不是\n的任意字符(非貪婪模式,發(fā)現(xiàn)一個(gè)就立即匹配結(jié)束)
#re.S如果不使用re.S參數(shù),則只在每一行內(nèi)進(jìn)行匹配俺猿,如果一行沒有颅眶,就換下一行重新開始,不會(huì)跨行。而使用re.S參數(shù)以后镀脂,正則表達(dá)式會(huì)將這個(gè)字符串作為一個(gè)整體苍碟,
# #將“\n”當(dāng)做一個(gè)普通的字符加入到這個(gè)字符串中酒觅,在整體中進(jìn)行匹配
pattern = re.compile(r'<a href="(.*?)".*?title">(.*?)</div>', re.S)
#print(pattern)
#正則 re.findall 的簡單用法(返回string中所有與pattern相匹配的全部字串,返回形式為數(shù)組
results = re.findall(pattern, content)
print(results)
for result in results:
url, name = result
#/s 替換成空白
print(url, re.sub('\s', '', name))
image.png
6.BeautifulSoup匹配html中的標(biāo)簽以及文本信息
#導(dǎo)入字符串html
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" id="link1">Elsie</a>,
<a class="sister" id="link2">Lacie</a> and
<a class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
#安裝lxml 功能:查找標(biāo)簽并且獲取里面內(nèi)容(第一個(gè)標(biāo)簽/所有標(biāo)簽)
soup = BeautifulSoup(html_doc, 'lxml')
#print(soup.prettify())
#
# # 找到title標(biāo)簽
print(soup.title)
#
# # title 標(biāo)簽里的內(nèi)容
print(soup.title.string)
# # 找到p標(biāo)簽
print(soup.p)
#
# # 找到p標(biāo)簽class的名字
# print(soup.p['class'])
#
# # 找到第一個(gè)a標(biāo)簽
# print(soup.a)
#
# # 找到所有的a標(biāo)簽
# print(soup.find_all('a'))
#
#
# # 找到id為link3的的標(biāo)簽
print(soup.find(id="link3"))
#
# # 找到所有<a>標(biāo)簽的鏈接
# for link in soup.find_all('a'):
# print(link.get('href'))
#
# # 找到文檔中所有的文本內(nèi)容
#print(soup.get_text())
7.爬取一個(gè)新聞網(wǎng)站: