因為之前想過 如果每天早上微信能夠發(fā)送天氣預報給我寿桨,給我老婆多好罗晕,然后就動手看網(wǎng)上的教程做了一個可以定時發(fā)送天氣預報的程序仔掸,
最近又想到折騰羡亩,做了一個更加詳細的版本危融。但是需要主動操作
具體操作看圖吉殃。
4bf724fa5a27670491cee4fd3b7977c.png
d3d3306aa96844c06a4dd991a398e2b.png
b0d0d26535ef1647a74fb0ec5853b5b.png
部分代碼:
#coding=utf8
import requests
from requests import exceptions
from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib.parse import urlencode
from threading import Timer
import re
from wxpy import *
import schedule
import time
import http
import json
import datetime
import random
bot = Bot(cache_path=True,console_qr = 1)
myself = bot.self
bot.enable_puid('wxpy_puid.pkl')
tuling = Tuling(api_key='換成自己的圖片key')
group = bot.groups().search(u'Test')
shgroup = bot.groups().search('伐木累??')
friends = bot.friends().search(u'Lie')
msgText = "Helo! 回復'功能'獲取對應功能\n1.天氣(例:蘇州天氣)\n2.今日nba(注:今日所有比賽結(jié)果)\n3.今日黃歷\n4.每日一句\n5.開啟機器人(關(guān)閉機器人)\n6.今日古詩詞\n7.每日閱讀\n8.歷史上的今天\n9.nba排名(注:當日東西部排名)\n10.新聞\n 1.頭條新聞\n 2.社會新聞\n 3.娛樂新聞\n 4.體育新聞\n11.星座運勢(例如:天秤座)" #任意回復獲取的菜單
newText = "你可以這樣回復: \n1.頭條新聞\n2.社會新聞\n3.娛樂新聞\n4.體育新聞"
def get_now_weather(city):
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
url = 'https://free-api.heweather.com/s6/weather/now?location='+city+'&key=換成自己的聚合數(shù)據(jù)key'
PMurl = 'https://free-api.heweather.com/s6/air/now?parameters&location='+city+'&key=換成自己的和風key'
# 設(shè)定超時時間巧娱,防止被網(wǎng)站認為是爬蟲
timeout = random.choice(range(80, 180))
rep = requests.get(url, headers=header, timeout=timeout)
pm = requests.get(PMurl, headers=header, timeout=timeout)
result = ''
temp = rep.json()
temp = temp['HeWeather6'][0]
update = temp['update']
now = temp['now']
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
pm = pm.json()
pm = pm['HeWeather6'][0]
print(now)
airnow = pm['air_now_city']
result = city + '實時天氣預報-' + '\n'\
+ '更新時間:'+ update['loc'] + '\n'\
+ ' 當前天氣:'+ now['cond_txt'] + '\n'\
+ ' 當前溫度:'+ now['tmp'] + '°C' + '\n'\
+ ' 體感溫度:'+ now['fl'] + '°C' + '\n'\
+ ' 風向:'+ now['wind_dir'] + ' ' + now['wind_sc'] + '級 '+ now['wind_spd'] + '公里/小時'+ '\n'\
+ ' 相對濕度:'+ now['hum'] + '%' + '\n'\
+ ' 降水量:'+ now['pcpn'] + 'ml' + '\n'\
+ ' 能見度:'+ now['vis'] + '公里' + '\n'\
+ ' 云量:'+ now['cloud'] + '\n'\
+ '-----------------------------------' + '\n'\
+ '當前空氣質(zhì)量:'+'\n'\
+ ' 空氣質(zhì)量指數(shù):'+ airnow['aqi']+'\n'\
+ ' 主要污染物:'+ airnow['main']+'\n'\
+ ' 空氣質(zhì)量:'+ airnow['qlty']+'\n'\
+ ' 二氧化氮指數(shù):'+ airnow['no2']+'\n'\
+ ' 二氧化硫指數(shù):'+ airnow['so2']+'\n'\
+ ' 一氧化碳指數(shù):'+ airnow['co']+'\n'\
+ ' pm10指數(shù):'+ airnow['pm10']+'\n'\
+ ' pm25指數(shù):'+ airnow['pm25']+'\n'\
+ ' 臭氧指數(shù):'+ airnow['o3']+'\n'
result = result + '發(fā)送時間:' + nowTime + '\n'
return result
def get_news(type):
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
url = 'http://v.juhe.cn/toutiao/index?type='+str(type)+'&key=換成自己的聚合數(shù)據(jù)key'
timeout = random.choice(range(80, 180))
rep = requests.get(url, headers=header, timeout=timeout)
data = json.loads(rep.text)
data = data['result']
data = data['data']
item = []
obj = {}
html = '今日'+str(type)+'新聞:'+ '\n'
for i in data:
html = html + '標題:' + i['title'] + '\n'\
+ '鏈接:' + i['url'] + '\n'\
+ '分類:' + i['category'] + '\n'\
+ '來自:' + i['author_name'] + '\n'\
+ '時間:' + i['date'] + '\n'\
+ '-----------------------------------------------' + '\n' +'\n' \
return html
def get_star(name):
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235'
}
url = 'http://web.juhe.cn:8080/constellation/getAll?consName='+str(name)+'&type=today&key=換成自己的聚合數(shù)據(jù)key'
timeout = random.choice(range(80, 180))
rep = requests.get(url, headers=header, timeout=timeout)
data = json.loads(rep.text)
starhtml = '今日'+str(name)+'運勢:'+ '\n'\
+ ' 綜合指數(shù):' + data['all'] + '\n'\
+ ' 幸運色:' + data['color'] + '\n'\
+ ' 健康指數(shù):' + data['health'] + '\n'\
+ ' 愛情指數(shù):' + data['love'] + '\n'\
+ ' 財運指數(shù):' + data['money'] + '\n'\
+ ' 速配星座:' + data['QFriend'] + '\n'\
+ ' 工作指數(shù):' + data['work'] + '\n'\
+ ' 今日概述:' + data['summary'] + '\n'\
return starhtml
def get_nba():
resp = urlopen('https://m.hupu.com/nba/game')
soup = BeautifulSoup(resp,'html.parser')
tagToday = soup.find('section',class_="match-today")
nbaHtml = '今日NBA比賽結(jié)果:' + '\n' + '\n'
for tag in tagToday.find_all('a', class_='match-wrap'):
nbaHtml = nbaHtml + tag.find('div', class_='away-team').span.get_text() + ' ' + tag.find('strong', class_='').span.get_text() + ' ' + tag.find('div', class_='home-team').span.get_text() + ' (' + tag.find('div', class_='match-status-txt').get_text() +')' + '\n'
return nbaHtml
def get_rank():
resp = urlopen('https://m.hupu.com/nba/stats')
soup = BeautifulSoup(resp,'html.parser')
east = soup.find_all('li',class_= "weast")[0]
west = soup.find_all('li',class_= "weast")[1]
rankHtml = '今日NBA東部排名:(1.排名 2.球隊 3.勝負 4.勝負差 5.最近情況)' + '\n' + '\n'
for tag in east.find_all('li', class_=''):
list = tag.find('p', class_='right-data')
rankHtml = rankHtml + tag.find('span', class_='rank').get_text() + '. ' + tag.find('div', class_='').h1.get_text() + ' ' + list.find_all('span')[0].get_text() + ' ' + list.find_all('span')[1].get_text() +' '+ list.find_all('span')[2].get_text() +'\n'
rankHtml = rankHtml + '\n' + '\n' + '---------------------------------------------' + '\n' + '\n'
rankHtml = rankHtml + '今日NBA西部排名:(1.排名 2.球隊 3.勝負 4.勝負差 5.最近情況)' + '\n' + '\n'
for tag in west.find_all('li', class_=''):
list = tag.find('p', class_='right-data')
rankHtml = rankHtml + tag.find('span', class_='rank').get_text() + '. ' + tag.find('div', class_='').h1.get_text() + ' ' + list.find_all('span')[0].get_text() + ' ' + list.find_all('span')[1].get_text() +' '+ list.find_all('span')[2].get_text() +'\n'
return rankHtml
def invite(user):
print('4')
group = bot.groups().search('cc')
group[0].add_members(user, use_invitation=True)
@bot.register(msg_types=FRIENDS)
@bot.register(group)
@bot.register(shgroup,TEXT)
@bot.register(friends)
喜歡的可以加微信 wex_5201314 驗證信息【 py】 拉你進群體驗 獲取關(guān)注公眾號 【故事膠片】 獲取源碼
或者直接掃描二維碼
WeChat Image_20181221112044.jpg