爬取指定賬號(hào)所有微博

github的一個(gè)項(xiàng)目 算是一個(gè)中型項(xiàng)目了 要寫的東西蠻多的

#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import codecs
import csv
import os
import random
import re
import sys
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from time import sleep

import requests
from lxml import etree
from tqdm import tqdm


class Weibo(object):
    cookie = {'Cookie': 'your cookie'}  # 將your cookie替換成自己的cookie

    def __init__(self, user_id, filter=0, pic_download=0):
        """Weibo類初始化"""
        if not isinstance(user_id, int):
            sys.exit(u'user_id值應(yīng)為一串?dāng)?shù)字形式,請(qǐng)重新輸入')
        if filter != 0 and filter != 1:
            sys.exit(u'filter值應(yīng)為0或1,請(qǐng)重新輸入')
        if pic_download != 0 and pic_download != 1:
            sys.exit(u'pic_download值應(yīng)為0或1,請(qǐng)重新輸入')
        self.user_id = user_id  # 用戶id,即需要我們輸入的數(shù)字,如昵稱為"Dear-迪麗熱巴"的id為1669879400
        self.filter = filter  # 取值范圍為0、1,程序默認(rèn)值為0,代表要爬取用戶的全部微博,1代表只爬取用戶的原創(chuàng)微博
        self.pic_download = pic_download  # 取值范圍為0烹骨、1,程序默認(rèn)值為0,代表不下載微博原始圖片,1代表下載
        self.nickname = ''  # 用戶昵稱,如“Dear-迪麗熱巴”
        self.weibo_num = 0  # 用戶全部微博數(shù)
        self.got_num = 0  # 爬取到的微博數(shù)
        self.following = 0  # 用戶關(guān)注數(shù)
        self.followers = 0  # 用戶粉絲數(shù)
        self.weibo = []  # 存儲(chǔ)爬取到的所有微博信息

    def deal_html(self, url):
        """處理html"""
        try:
            html = requests.get(url, cookies=self.cookie).content
            selector = etree.HTML(html)
            return selector
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def deal_garbled(self, info):
        """處理亂碼"""
        try:
            info = (info.xpath('string(.)').replace(u'\u200b', '').encode(
                sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding))
            return info
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_nickname(self):
        """獲取用戶昵稱"""
        try:
            url = 'https://weibo.cn/%d/info' % (self.user_id)
            selector = self.deal_html(url)
            nickname = selector.xpath('//title/text()')[0]
            self.nickname = nickname[:-3]
            if self.nickname == u'登錄 - 新' or self.nickname == u'新浪':
                sys.exit(u'cookie錯(cuò)誤或已過期,請(qǐng)按照README中方法重新獲取')
            print(u'用戶昵稱: ' + self.nickname)
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_user_info(self, selector):
        """獲取用戶昵稱、微博數(shù)、關(guān)注數(shù)屿岂、粉絲數(shù)"""
        try:
            self.get_nickname()  # 獲取用戶昵稱
            user_info = selector.xpath("http://div[@class='tip2']/*/text()")

            self.weibo_num = int(user_info[0][3:-1])
            print(u'微博數(shù): ' + str(self.weibo_num))

            self.following = int(user_info[1][3:-1])
            print(u'關(guān)注數(shù): ' + str(self.following))

            self.followers = int(user_info[2][3:-1])
            print(u'粉絲數(shù): ' + str(self.followers))
            print('*' * 100)
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_page_num(self, selector):
        """獲取微博總頁(yè)數(shù)"""
        try:
            if selector.xpath("http://input[@name='mp']") == []:
                page_num = 1
            else:
                page_num = (int)(
                    selector.xpath("http://input[@name='mp']")[0].attrib['value'])
            return page_num
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_long_weibo(self, weibo_link):
        """獲取長(zhǎng)原創(chuàng)微博"""
        try:
            selector = self.deal_html(weibo_link)
            info = selector.xpath("http://div[@class='c']")[1]
            wb_content = self.deal_garbled(info)
            wb_time = info.xpath("http://span[@class='ct']/text()")[0]
            weibo_content = wb_content[wb_content.find(':') +
                                       1:wb_content.rfind(wb_time)]
            return weibo_content
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_original_weibo(self, info, weibo_id):
        """獲取原創(chuàng)微博"""
        try:
            weibo_content = self.deal_garbled(info)
            weibo_content = weibo_content[:weibo_content.rfind(u'贊')]
            a_text = info.xpath('div//a/text()')
            if u'全文' in a_text:
                weibo_link = 'https://weibo.cn/comment/' + weibo_id
                wb_content = self.get_long_weibo(weibo_link)
                if wb_content:
                    weibo_content = wb_content
            return weibo_content
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_long_retweet(self, weibo_link):
        """獲取長(zhǎng)轉(zhuǎn)發(fā)微博"""
        try:
            wb_content = self.get_long_weibo(weibo_link)
            weibo_content = wb_content[:wb_content.rfind(u'原文轉(zhuǎn)發(fā)')]
            return weibo_content
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_retweet(self, info, weibo_id):
        """獲取轉(zhuǎn)發(fā)微博"""
        try:
            original_user = info.xpath("div/span[@class='cmt']/a/text()")
            if not original_user:
                wb_content = u'轉(zhuǎn)發(fā)微博已被刪除'
                return wb_content
            else:
                original_user = original_user[0]
            wb_content = self.deal_garbled(info)
            wb_content = wb_content[wb_content.find(':') +
                                    1:wb_content.rfind(u'贊')]
            wb_content = wb_content[:wb_content.rfind(u'贊')]
            a_text = info.xpath('div//a/text()')
            if u'全文' in a_text:
                weibo_link = 'https://weibo.cn/comment/' + weibo_id
                weibo_content = self.get_long_retweet(weibo_link)
                if weibo_content:
                    wb_content = weibo_content
            retweet_reason = self.deal_garbled(info.xpath('div')[-1])
            retweet_reason = retweet_reason[:retweet_reason.rindex(u'贊')]
            wb_content = (retweet_reason + '\n' + u'原始用戶: ' + original_user +
                          '\n' + u'轉(zhuǎn)發(fā)內(nèi)容: ' + wb_content)
            return wb_content
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def is_original(self, info):
        """判斷微博是否為原創(chuàng)微博"""
        is_original = info.xpath("div/span[@class='cmt']")
        if len(is_original) > 3:
            return False
        else:
            return True

    def get_weibo_content(self, info, is_original):
        """獲取微博內(nèi)容"""
        try:
            weibo_id = info.xpath('@id')[0][2:]
            if is_original:
                weibo_content = self.get_original_weibo(info, weibo_id)
            else:
                weibo_content = self.get_retweet(info, weibo_id)
            print(weibo_content)
            return weibo_content
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_publish_place(self, info):
        """獲取微博發(fā)布位置"""
        try:
            div_first = info.xpath('div')[0]
            a_list = div_first.xpath('a')
            publish_place = u'無(wú)'
            for a in a_list:
                if ('place.weibo.com' in a.xpath('@href')[0]
                        and a.xpath('text()')[0] == u'顯示地圖'):
                    weibo_a = div_first.xpath("span[@class='ctt']/a")
                    if len(weibo_a) >= 1:
                        publish_place = weibo_a[-1]
                        if (u'視頻' == div_first.xpath(
                                "span[@class='ctt']/a/text()")[-1][-2:]):
                            if len(weibo_a) >= 2:
                                publish_place = weibo_a[-2]
                            else:
                                publish_place = u'無(wú)'
                        publish_place = self.deal_garbled(publish_place)
                        break
            print(u'微博發(fā)布位置: ' + publish_place)
            return publish_place
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_publish_time(self, info):
        """獲取微博發(fā)布時(shí)間"""
        try:
            str_time = info.xpath("div/span[@class='ct']")
            str_time = self.deal_garbled(str_time[0])
            publish_time = str_time.split(u'來自')[0]
            if u'剛剛' in publish_time:
                publish_time = datetime.now().strftime('%Y-%m-%d %H:%M')
            elif u'分鐘' in publish_time:
                minute = publish_time[:publish_time.find(u'分鐘')]
                minute = timedelta(minutes=int(minute))
                publish_time = (datetime.now() -
                                minute).strftime('%Y-%m-%d %H:%M')
            elif u'今天' in publish_time:
                today = datetime.now().strftime('%Y-%m-%d')
                time = publish_time[3:]
                publish_time = today + ' ' + time
            elif u'月' in publish_time:
                year = datetime.now().strftime('%Y')
                month = publish_time[0:2]
                day = publish_time[3:5]
                time = publish_time[7:12]
                publish_time = year + '-' + month + '-' + day + ' ' + time
            else:
                publish_time = publish_time[:16]
            print(u'微博發(fā)布時(shí)間: ' + publish_time)
            return publish_time
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_publish_tool(self, info):
        """獲取微博發(fā)布工具"""
        try:
            str_time = info.xpath("div/span[@class='ct']")
            str_time = self.deal_garbled(str_time[0])
            if len(str_time.split(u'來自')) > 1:
                publish_tool = str_time.split(u'來自')[1]
            else:
                publish_tool = u'無(wú)'
            print(u'微博發(fā)布工具: ' + publish_tool)
            return publish_tool
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_weibo_footer(self, info):
        """獲取微博點(diǎn)贊數(shù)腺晾、轉(zhuǎn)發(fā)數(shù)工猜、評(píng)論數(shù)"""
        try:
            footer = {}
            pattern = r'\d+'
            str_footer = info.xpath('div')[-1]
            str_footer = self.deal_garbled(str_footer)
            str_footer = str_footer[str_footer.rfind(u'贊'):]
            weibo_footer = re.findall(pattern, str_footer, re.M)

            up_num = int(weibo_footer[0])
            print(u'點(diǎn)贊數(shù): ' + str(up_num))
            footer['up_num'] = up_num

            retweet_num = int(weibo_footer[1])
            print(u'轉(zhuǎn)發(fā)數(shù): ' + str(retweet_num))
            footer['retweet_num'] = retweet_num

            comment_num = int(weibo_footer[2])
            print(u'評(píng)論數(shù): ' + str(comment_num))
            footer['comment_num'] = comment_num
            return footer
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def extract_picture_urls(self, info, weibo_id):
        """提取微博原始圖片url"""
        try:
            a_list = info.xpath('div/a/@href')
            first_pic = 'https://weibo.cn/mblog/pic/' + weibo_id + '?rl=0'
            all_pic = 'https://weibo.cn/mblog/picAll/' + weibo_id + '?rl=1'
            if first_pic in a_list:
                if all_pic in a_list:
                    selector = self.deal_html(all_pic)
                    preview_picture_list = selector.xpath('//img/@src')
                    picture_list = [
                        p.replace('/thumb180/', '/large/')
                        for p in preview_picture_list
                    ]
                    picture_urls = ','.join(picture_list)
                else:
                    if info.xpath('.//img/@src'):
                        preview_picture = info.xpath('.//img/@src')[-1]
                        picture_urls = preview_picture.replace(
                            '/wap180/', '/large/')
                    else:
                        sys.exit(
                            u"爬蟲微博可能被設(shè)置成了'不顯示圖片',請(qǐng)前往"
                            u"'https://weibo.cn/account/customize/pic'肛著,修改為'顯示'"
                        )
            else:
                picture_urls = '無(wú)'
            return picture_urls
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_picture_urls(self, info, is_original):
        """獲取微博原始圖片url"""
        try:
            weibo_id = info.xpath('@id')[0][2:]
            picture_urls = {}
            if is_original:
                original_pictures = self.extract_picture_urls(info, weibo_id)
                picture_urls['original_pictures'] = original_pictures
                if not self.filter:
                    picture_urls['retweet_pictures'] = '無(wú)'
            else:
                retweet_url = info.xpath("div/a[@class='cc']/@href")[0]
                retweet_id = retweet_url.split('/')[-1].split('?')[0]
                retweet_pictures = self.extract_picture_urls(info, retweet_id)
                picture_urls['retweet_pictures'] = retweet_pictures
                a_list = info.xpath('div[last()]/a/@href')
                original_picture = '無(wú)'
                for a in a_list:
                    if a.endswith(('.gif', '.jpeg', '.jpg', '.png')):
                        original_picture = a
                        break
                picture_urls['original_pictures'] = original_picture
            return picture_urls
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def download_pic(self, url, pic_path):
        """下載單張圖片"""
        try:
            p = requests.get(url)
            with open(pic_path, 'wb') as f:
                f.write(p.content)
        except Exception as e:
            error_file = self.get_filepath(
                'img') + os.sep + 'not_downloaded_pictures.txt'
            with open(error_file, 'ab') as f:
                url = url + '\n'
                f.write(url.encode(sys.stdout.encoding))
            print('Error: ', e)
            traceback.print_exc()

    def download_pictures(self):
        """下載微博圖片"""
        try:
            print(u'即將進(jìn)行圖片下載')
            img_dir = self.get_filepath('img')
            for w in tqdm(self.weibo, desc=u'圖片下載進(jìn)度'):
                if w['original_pictures'] != '無(wú)':
                    pic_prefix = w['publish_time'][:11].replace(
                        '-', '') + '_' + w['id']
                    if ',' in w['original_pictures']:
                        w['original_pictures'] = w['original_pictures'].split(
                            ',')
                        for j, url in enumerate(w['original_pictures']):
                            pic_suffix = url[url.rfind('.'):]
                            pic_name = pic_prefix + '_' + str(j +
                                                              1) + pic_suffix
                            pic_path = img_dir + os.sep + pic_name
                            self.download_pic(url, pic_path)
                    else:
                        pic_suffix = w['original_pictures'][
                            w['original_pictures'].rfind('.'):]
                        pic_name = pic_prefix + pic_suffix
                        pic_path = img_dir + os.sep + pic_name
                        self.download_pic(w['original_pictures'], pic_path)
            print(u'圖片下載完畢,保存路徑:')
            print(img_dir)
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_one_weibo(self, info):
        """獲取一條微博的全部信息"""
        try:
            weibo = OrderedDict()
            is_original = self.is_original(info)
            if (not self.filter) or is_original:
                weibo['id'] = info.xpath('@id')[0][2:]
                weibo['content'] = self.get_weibo_content(info,
                                                          is_original)  # 微博內(nèi)容
                picture_urls = self.get_picture_urls(info, is_original)
                weibo['original_pictures'] = picture_urls[
                    'original_pictures']  # 原創(chuàng)圖片url
                if not self.filter:
                    weibo['retweet_pictures'] = picture_urls[
                        'retweet_pictures']  # 轉(zhuǎn)發(fā)圖片url
                    weibo['original'] = is_original  # 是否原創(chuàng)微博
                weibo['publish_place'] = self.get_publish_place(info)  # 微博發(fā)布位置
                weibo['publish_time'] = self.get_publish_time(info)  # 微博發(fā)布時(shí)間
                weibo['publish_tool'] = self.get_publish_tool(info)  # 微博發(fā)布工具
                footer = self.get_weibo_footer(info)
                weibo['up_num'] = footer['up_num']  # 微博點(diǎn)贊數(shù)
                weibo['retweet_num'] = footer['retweet_num']  # 轉(zhuǎn)發(fā)數(shù)
                weibo['comment_num'] = footer['comment_num']  # 評(píng)論數(shù)
            else:
                weibo = None
            return weibo
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_one_page(self, page):
        """獲取第page頁(yè)的全部微博"""
        try:
            url = 'https://weibo.cn/u/%d?page=%d' % (self.user_id, page)
            selector = self.deal_html(url)
            info = selector.xpath("http://div[@class='c']")
            is_exist = info[0].xpath("div/span[@class='ctt']")
            if is_exist:
                for i in range(0, len(info) - 2):
                    weibo = self.get_one_weibo(info[i])
                    if weibo:
                        self.weibo.append(weibo)
                        self.got_num += 1
                        print('-' * 100)
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def get_filepath(self, type):
        """獲取結(jié)果文件路徑"""
        try:
            file_dir = os.path.split(os.path.realpath(
                __file__))[0] + os.sep + 'weibo' + os.sep + self.nickname
            if type == 'img':
                file_dir = file_dir + os.sep + 'img'
            if not os.path.isdir(file_dir):
                os.makedirs(file_dir)
            if type == 'img':
                return file_dir
            file_path = file_dir + os.sep + '%d' % self.user_id + '.' + type
            return file_path
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def write_csv(self, wrote_num):
        """將爬取的信息寫入csv文件"""
        try:
            result_headers = [
                '微博id',
                '微博正文',
                '原始圖片url',
                '發(fā)布位置',
                '發(fā)布時(shí)間',
                '發(fā)布工具',
                '點(diǎn)贊數(shù)',
                '轉(zhuǎn)發(fā)數(shù)',
                '評(píng)論數(shù)',
            ]
            if not self.filter:
                result_headers.insert(3, '被轉(zhuǎn)發(fā)微博原始圖片url')
                result_headers.insert(4, '是否為原創(chuàng)微博')
            result_data = [w.values() for w in self.weibo][wrote_num:]
            if sys.version < '3':  # python2.x
                reload(sys)
                sys.setdefaultencoding('utf-8')
                with open(self.get_filepath('csv'), 'ab') as f:
                    f.write(codecs.BOM_UTF8)
                    writer = csv.writer(f)
                    if wrote_num == 0:
                        writer.writerows([result_headers])
                    writer.writerows(result_data)
            else:  # python3.x
                with open(self.get_filepath('csv'),
                          'a',
                          encoding='utf-8-sig',
                          newline='') as f:
                    writer = csv.writer(f)
                    if wrote_num == 0:
                        writer.writerows([result_headers])
                    writer.writerows(result_data)
            print(u'%d條微博寫入csv文件完畢,保存路徑:' % self.got_num)
            print(self.get_filepath('csv'))
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def write_txt(self, wrote_num):
        """將爬取的信息寫入txt文件"""
        try:
            temp_result = []
            if wrote_num == 0:
                if self.filter:
                    result_header = u'\n\n原創(chuàng)微博內(nèi)容: \n'
                else:
                    result_header = u'\n\n微博內(nèi)容: \n'
                result_header = (u'用戶信息\n用戶昵稱:' + self.nickname + u'\n用戶id: ' +
                                 str(self.user_id) + u'\n微博數(shù): ' +
                                 str(self.weibo_num) + u'\n關(guān)注數(shù): ' +
                                 str(self.following) + u'\n粉絲數(shù): ' +
                                 str(self.followers) + result_header)
                temp_result.append(result_header)
            for i, w in enumerate(self.weibo[wrote_num:]):
                temp_result.append(
                    str(wrote_num + i + 1) + ':' + w['content'] + '\n' +
                    u'微博位置: ' + w['publish_place'] + '\n' + u'發(fā)布時(shí)間: ' +
                    w['publish_time'] + '\n' + u'點(diǎn)贊數(shù): ' + str(w['up_num']) +
                    u'   轉(zhuǎn)發(fā)數(shù): ' + str(w['retweet_num']) + u'   評(píng)論數(shù): ' +
                    str(w['comment_num']) + '\n' + u'發(fā)布工具: ' +
                    w['publish_tool'] + '\n\n')
            result = ''.join(temp_result)
            with open(self.get_filepath('txt'), 'ab') as f:
                f.write(result.encode(sys.stdout.encoding))
            print(u'%d條微博寫入txt文件完畢,保存路徑:' % self.got_num)
            print(self.get_filepath('txt'))
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def write_file(self, wrote_num):
        """寫文件"""
        if self.got_num > wrote_num:
            self.write_csv(wrote_num)
            self.write_txt(wrote_num)

    def get_weibo_info(self):
        """獲取微博信息"""
        try:
            url = 'https://weibo.cn/u/%d' % (self.user_id)
            selector = self.deal_html(url)
            self.get_user_info(selector)  # 獲取用戶昵稱、微博數(shù)跺讯、關(guān)注數(shù)枢贿、粉絲數(shù)
            page_num = self.get_page_num(selector)  # 獲取微博總頁(yè)數(shù)
            wrote_num = 0
            page1 = 0
            random_pages = random.randint(1, 5)
            for page in tqdm(range(1, page_num + 1), desc=u'進(jìn)度'):
                self.get_one_page(page)  # 獲取第page頁(yè)的全部微博

                if page % 20 == 0:  # 每爬20頁(yè)寫入一次文件
                    self.write_file(wrote_num)
                    wrote_num = self.got_num

                # 通過加入隨機(jī)等待避免被限制。爬蟲速度過快容易被系統(tǒng)限制(一段時(shí)間后限
                # 制會(huì)自動(dòng)解除)抬吟,加入隨機(jī)等待模擬人的操作萨咕,可降低被系統(tǒng)限制的風(fēng)險(xiǎn)。默
                # 認(rèn)是每爬取1到5頁(yè)隨機(jī)等待6到10秒火本,如果仍然被限危队,可適當(dāng)增加sleep時(shí)間
                if page - page1 == random_pages and page < page_num:
                    sleep(random.randint(6, 10))
                    page1 = page
                    random_pages = random.randint(1, 5)

            self.write_file(wrote_num)  # 將剩余不足20頁(yè)的微博寫入文件
            if not self.filter:
                print(u'共爬取' + str(self.got_num) + u'條微博')
            else:
                print(u'共爬取' + str(self.got_num) + u'條原創(chuàng)微博')
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()

    def start(self):
        """運(yùn)行爬蟲"""
        try:
            self.get_weibo_info()
            print(u'信息抓取完畢')
            print('*' * 100)
            if self.pic_download == 1:
                self.download_pictures()
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()


def main():
    try:
        # 使用實(shí)例,輸入一個(gè)用戶id,所有信息都會(huì)存儲(chǔ)在wb實(shí)例中
        user_id = 1669879400  # 可以改成任意合法的用戶id(爬蟲的微博id除外)
        filter = 1  # 值為0表示爬取全部微博(原創(chuàng)微博+轉(zhuǎn)發(fā)微博)钙畔,值為1表示只爬取原創(chuàng)微博
        pic_download = 1  # 值為0代表不下載微博原始圖片,1代表下載微博原始圖片
        wb = Weibo(user_id, filter, pic_download)  # 調(diào)用Weibo類茫陆,創(chuàng)建微博實(shí)例wb
        wb.start()  # 爬取微博信息
        print(u'用戶昵稱: ' + wb.nickname)
        print(u'全部微博數(shù): ' + str(wb.weibo_num))
        print(u'關(guān)注數(shù): ' + str(wb.following))
        print(u'粉絲數(shù): ' + str(wb.followers))
        if wb.weibo:
            print(u'最新/置頂 微博為: ' + wb.weibo[0]['content'])
            print(u'最新/置頂 微博位置: ' + wb.weibo[0]['publish_place'])
            print(u'最新/置頂 微博發(fā)布時(shí)間: ' + wb.weibo[0]['publish_time'])
            print(u'最新/置頂 微博獲得贊數(shù): ' + str(wb.weibo[0]['up_num']))
            print(u'最新/置頂 微博獲得轉(zhuǎn)發(fā)數(shù): ' + str(wb.weibo[0]['retweet_num']))
            print(u'最新/置頂 微博獲得評(píng)論數(shù): ' + str(wb.weibo[0]['comment_num']))
            print(u'最新/置頂 微博發(fā)布工具: ' + wb.weibo[0]['publish_tool'])
    except Exception as e:
        print('Error: ', e)
        traceback.print_exc()


if __name__ == '__main__':
    main()

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個(gè)濱河市擎析,隨后出現(xiàn)的幾起案子簿盅,更是在濱河造成了極大的恐慌,老刑警劉巖揍魂,帶你破解...
    沈念sama閱讀 222,627評(píng)論 6 517
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件桨醋,死亡現(xiàn)場(chǎng)離奇詭異,居然都是意外死亡现斋,警方通過查閱死者的電腦和手機(jī)喜最,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 95,180評(píng)論 3 399
  • 文/潘曉璐 我一進(jìn)店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來庄蹋,“玉大人瞬内,你說我怎么就攤上這事迷雪。” “怎么了虫蝶?”我有些...
    開封第一講書人閱讀 169,346評(píng)論 0 362
  • 文/不壞的土叔 我叫張陵章咧,是天一觀的道長(zhǎng)。 經(jīng)常有香客問我能真,道長(zhǎng)赁严,這世上最難降的妖魔是什么? 我笑而不...
    開封第一講書人閱讀 60,097評(píng)論 1 300
  • 正文 為了忘掉前任舟陆,我火速辦了婚禮误澳,結(jié)果婚禮上,老公的妹妹穿的比我還像新娘秦躯。我一直安慰自己忆谓,他們只是感情好,可當(dāng)我...
    茶點(diǎn)故事閱讀 69,100評(píng)論 6 398
  • 文/花漫 我一把揭開白布踱承。 她就那樣靜靜地躺著倡缠,像睡著了一般。 火紅的嫁衣襯著肌膚如雪茎活。 梳的紋絲不亂的頭發(fā)上昙沦,一...
    開封第一講書人閱讀 52,696評(píng)論 1 312
  • 那天,我揣著相機(jī)與錄音载荔,去河邊找鬼盾饮。 笑死,一個(gè)胖子當(dāng)著我的面吹牛懒熙,可吹牛的內(nèi)容都是我干的丘损。 我是一名探鬼主播,決...
    沈念sama閱讀 41,165評(píng)論 3 422
  • 文/蒼蘭香墨 我猛地睜開眼工扎,長(zhǎng)吁一口氣:“原來是場(chǎng)噩夢(mèng)啊……” “哼徘钥!你這毒婦竟也來了?” 一聲冷哼從身側(cè)響起肢娘,我...
    開封第一講書人閱讀 40,108評(píng)論 0 277
  • 序言:老撾萬(wàn)榮一對(duì)情侶失蹤呈础,失蹤者是張志新(化名)和其女友劉穎,沒想到半個(gè)月后橱健,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體而钞,經(jīng)...
    沈念sama閱讀 46,646評(píng)論 1 319
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 38,709評(píng)論 3 342
  • 正文 我和宋清朗相戀三年拘荡,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了笨忌。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
    茶點(diǎn)故事閱讀 40,861評(píng)論 1 353
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡,死狀恐怖官疲,靈堂內(nèi)的尸體忽然破棺而出,到底是詐尸還是另有隱情亮隙,我是刑警寧澤途凫,帶...
    沈念sama閱讀 36,527評(píng)論 5 351
  • 正文 年R本政府宣布,位于F島的核電站溢吻,受9級(jí)特大地震影響维费,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜促王,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 42,196評(píng)論 3 336
  • 文/蒙蒙 一犀盟、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧蝇狼,春花似錦阅畴、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 32,698評(píng)論 0 25
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽(yáng)。三九已至颤专,卻和暖如春纽哥,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背栖秕。 一陣腳步聲響...
    開封第一講書人閱讀 33,804評(píng)論 1 274
  • 我被黑心中介騙來泰國(guó)打工春塌, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留,地道東北人簇捍。 一個(gè)月前我還...
    沈念sama閱讀 49,287評(píng)論 3 379
  • 正文 我出身青樓只壳,卻偏偏與公主長(zhǎng)得像,于是被迫代替她去往敵國(guó)和親垦写。 傳聞我的和親對(duì)象是個(gè)殘疾皇子吕世,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 45,860評(píng)論 2 361

推薦閱讀更多精彩內(nèi)容

  • 真正的聰明人命辖,往往很少交朋友 聰明人,往往很少交朋友分蓖,他們和多數(shù)人保持友好尔艇,卻只和少數(shù)同等級(jí)的聰明人深交,你身邊有...
    蠟筆滴愛十年閱讀 861評(píng)論 0 0
  • 又是一個(gè)雨天么鹤,跟昨天預(yù)想的差不多终娃,整整一個(gè)下午,就坐在家里看雨蒸甜,自從搬離了以前的老房子棠耕,就再也聽不到下雨的聲音了余佛,...
    南城烽火閱讀 422評(píng)論 0 2
  • 我酷愛畫畫,愿以畫畫為樂趣窍荧,以下是我的一些作品辉巡,希望大家多提寶貴意見。 第一次嘗試著用水粉作畫蕊退,老師說:“...
    夏衛(wèi)英閱讀 857評(píng)論 2 9
  • 2018.03.11 晴 開學(xué)的第一個(gè)星期天郊楣,我就迫不及待的回了家,當(dāng)然瓤荔,家里只有沒找見...
    時(shí)間很美閱讀 143評(píng)論 0 0