coding=utf-8
import requests
from bs4 import BeautifulSoup
import re
import os
import pymongo
import json
import pandas as pd
import numpy as np
import xlrd
import datetime
from pyecharts import Line,Grid,EffectScatter,Overlap
def getPriceSoup_table(spiderDay):
soup_table=BeautifulSoup('',"lxml")
for m in range(1,4):
url = "http://www.hfzgncp.com.cn/index.php?m=content&c=index&a=lists&catid=59&sendtime="+str(spiderDay)+"&page="+str(m)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0)' +
'Gecko/20100101 Firefox/61.0'}
r = requests.get(url,headers=headers)
html = r.text.encode(encoding='utf_8').decode()
soup = BeautifulSoup(html,"lxml")
table = soup.find('table', attrs={'class' :{'h_list_table r_list_table'}})
soup_table.append(table)
if soup_table.find('td') == None:
print('!!!!!!!!!!!!!!!!!!!!!!'+str(spiderDay)+'這一天沒有水產(chǎn)品價格數(shù)據(jù)!!!!!!!!!!!!!!!!!!!!!!')
return
print(str(spiderDay)+'這一天有水產(chǎn)品價格數(shù)據(jù)')
return soup_table
def soup_tabletoDataFrame(soup_table): #構造dataframe 準備存儲表格
#收集表頭
columns = [x.text for x in soup_table.tr.findAll('th')]
columns = [x.replace('\xa0','') for x in columns]
#print('水產(chǎn)品表頭列表:'+str(columns))
#表的寬度
width = len(columns)
print('水產(chǎn)品表頭數(shù)目:'+str(width))
#height = len(soup_table.findAll(lambda tag:tag.name=='tr' and len(tag.findAll('td'))>=1))
#print('height:'+height)
rows=[]
for row in soup_table.findAll('tr'):
if row not in rows and row.find('td')!=None:
rows.append(row)
height = len(rows) #水產(chǎn)品種類數(shù)目
if height <=0:
return
print('水產(chǎn)品種類數(shù)目:'+str(height))
for i in range(height):
cells = rows[i].findAll('td')
#print(cells)
df = pd.DataFrame(data = np.full((height,width),'',dtype = 'U'),columns = columns)
#逐行分析表格
for i in range(height):
cells = rows[i].findAll('td')
if len(cells) == width:
df.iloc[i] = [cell.text.replace(' ','').replace('\n','') for cell in cells] #去點空格和換行
else:
w=len(cells)
df.iloc[i,width-w:] = [cell.text.replace(' ','').replace('\n','') for cell in cells]
return df
def onedayPriceSpider(spiderDay):
#查看表格數(shù)據(jù)行數(shù)
#height = len(table.findAll(lambda tag:tag.name=='tr' and len(tag.findAll('td'))>=1))
#rows = [row for row in table.findAll('tr') if row.find('td')!=None]
#print('rows:'+str(len(rows)))
#sendtime = soup.find('input', attrs={'id' :{'sendtime'}})['value'].rstrip('/-') #獲取數(shù)據(jù)時間
#sendtimeStr=re.sub("\-","",sendtime)
def getPriceLine(product_name,days):
price_data = []
price_date = []
for i in range(days):
spiderDay = datetime.date.today()- datetime.timedelta(days=i+1)
spiderDayStr =str(spiderDay) #2018-07-11格式
sendtimeStr=re.sub("-","",spiderDayStr) #20180711格式
#outputfilePath="D:/xlsx/"+sendtimeStr+".水產(chǎn)品價格xlsx"
if os.path.exists(outputfilePath):
ExcelFile = xlrd.open_workbook(outputfilePath)
sheet = ExcelFile.sheet_by_index(0)
columnIndex = None
rowIndex = None
for j in range(sheet.ncols):
for i in range(sheet.nrows):
if sheet.cell_value(i, j) == '平均價':
columnIndex = j
break
if sheet.cell_value(i, j) == product_name:
rowIndex = i
break
if not (rowIndex == None) and not (columnIndex == None):
print(sheet.cell_value(rowIndex, columnIndex))
price_data.append(sheet.cell_value(rowIndex, columnIndex))
price_date.append(sendtimeStr)
print(price_data)
print(price_date)
attr = price_date[::-1]
v1 = price_data[::-1]
line = Line(product_name+'價格走勢圖')
line.add(product_name,attr,v1,is_smooth = True,mark_point = ['max','min'],mark_line=["average"],yaxis_formatter="元")
grid =Grid()
grid.add(line,grid_top="10%")
判斷是否存在目標文件夾
isExists=os.path.exists('D:/價格走勢圖')
if not isExists:
如果不存在則創(chuàng)建目錄
os.makedirs('D:/價格走勢圖')
print ('創(chuàng)建D:/價格走勢圖文件夾成功')
#return True
else:
# 如果目錄存在則不創(chuàng)建店诗,并提示目錄已存在
print ('目錄已存在')
grid.render('D:/價格走勢圖/'+product_name+str(days)+'天價格走勢圖.html')
print('已得到'+product_name+'價格走勢圖')
es = EffectScatter()
es.add('',attr,v1,effect_scale=8) #閃爍
overlop = Overlap()
overlop.add(line) #必須先添加line,在添加es
overlop.add(es)
overlop.render('./line-es01.html')
return
#client= pymongo.MongoClient()
#獲取一個數(shù)據(jù)庫
#db=client.priceSpider
#創(chuàng)建 或獲取一個集合音同,并在collection下新建books
#account=db.prcie
#data=xlrd.open_workbook("D:/"+sendtimeStr+".xlsx")
#table=data.sheets()[0]
#讀取excel第一行數(shù)據(jù)作為存入mongodb的字段名
#rowstag=table.row_values(0)
#nrows=table.nrows
#print('-------------nrows----------------'+str(nrows))
#ncols=table.ncols #print rows
#returnData={}
#for i in range(1,nrows):
#將字段名和excel數(shù)據(jù)存儲為字典形式,并轉(zhuǎn)換為json格式
#returnData[i]=json.dumps(dict(zip(rowstag,table.row_values(i))))
#通過編解碼還原數(shù)據(jù)
#returnData[i]=json.loads(returnData[i])
#print returnData[i]
#account.insert(returnData[i])
#return daySpider
if name=="main":
spiderDaynumber = 60
for i in range(spiderDaynumber):
spiderDay = datetime.date.today()- datetime.timedelta(days=i+1)
spiderDayStr =str(spiderDay) #2018-07-11格式
sendtimeStr=re.sub("\-","",spiderDayStr) #20180711格式
soup_table = getPriceSoup_table(spiderDay)
if not soup_table == None:
df = soup_tabletoDataFrame(soup_table)
isExists=os.path.exists('D:/xlsx')
if not isExists:
# 如果不存在則創(chuàng)建目錄
os.makedirs('D:/xlsx')
print ('創(chuàng)建D:/xlsx文件夾成功')
#return True
else:
outputfilePath="D:/xlsx/"+sendtimeStr+"水產(chǎn)品價格.xlsx"
df.to_excel(outputfilePath)
print('-------------------------------')
getPriceLine('帶魚(大)',59)