python數(shù)據(jù)分析(十一)

# -*- coding: utf-8 -*-

from __future__ import division

from numpy.random import randn

import numpy as np

import os

import matplotlib.pyplot as plt

np.random.seed(12345)

plt.rc('figure', figsize=(10, 6))

from pandas import Series, DataFrame

import pandas as pd

np.set_printoptions(precision=4)

pd.options.display.notebook_repr_html = False

get_ipython().magic(u'matplotlib inline')

### GroupBy 技術(shù)

df = DataFrame({'key1' : ['a', 'a', 'b', 'b', 'a'],

'key2' : ['one', 'two', 'one', 'two', 'one'],

'data1' : np.random.randn(5),

'data2' : np.random.randn(5)})

df

grouped = df['data1'].groupby(df['key1'])

grouped

grouped.mean()

means = df['data1'].groupby([df['key1'], df['key2']]).mean()

means

means.unstack()

states = np.array(['Ohio', 'California', 'California', 'Ohio', 'Ohio'])

years = np.array([2005, 2005, 2006, 2005, 2006])

df['data1'].groupby([states, years]).mean()

df.groupby('key1').mean()

df.groupby(['key1', 'key2']).mean()

df.groupby(['key1', 'key2']).size()

# ### 對(duì)分組進(jìn)行迭代

for name, group in df.groupby('key1'):

print(name)

print(group)

df.groupby('key1')

for (k1, k2), group in df.groupby(['key1', 'key2']):

print((k1, k2))

print(group)

pieces = dict(list(df.groupby('key1')))

pieces['b']

df.dtypes

grouped = df.groupby(df.dtypes, axis=1)

dict(list(grouped))

# ### 選擇一個(gè)或一組列

df.groupby('key1')['data1']

df.groupby('key1')[['data2']]

df['data1'].groupby(df['key1'])

df[['data2']].groupby(df['key1'])

df.groupby(['key1', 'key2'])[['data2']].mean()

s_grouped = df.groupby(['key1', 'key2'])['data2']

s_grouped

s_grouped.mean()

# ### 通過字典或series進(jìn)行分組

people = DataFrame(np.random.randn(5, 5),

columns=['a', 'b', 'c', 'd', 'e'],

index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])

people.ix[2:3, ['b', 'c']] = np.nan # Add a few NA values

people

mapping = {'a': 'red', 'b': 'red', 'c': 'blue',

'd': 'blue', 'e': 'red', 'f' : 'orange'}

by_column = people.groupby(mapping, axis=1)

by_column.sum()

map_series = Series(mapping)

map_series

people.groupby(map_series, axis=1).count()

# ### 通過函數(shù)進(jìn)行分組

people.groupby(len).sum()

key_list = ['one', 'one', 'one', 'two', 'two']

people.groupby([len, key_list]).min()

# ### 通過索引進(jìn)行分組

columns = pd.MultiIndex.from_arrays([['US', 'US', 'US', 'JP', 'JP'],

[1, 3, 5, 1, 3]], names=['cty', 'tenor'])

hier_df = DataFrame(np.random.randn(4, 5), columns=columns)

hier_df

hier_df.groupby(level='cty', axis=1).count()

# ##數(shù)據(jù)聚合

df

grouped = df.groupby('key1')

grouped['data1'].quantile(0.9)

def peak_to_peak(arr):

return arr.max() - arr.min()

grouped.agg(peak_to_peak)

grouped.describe()

# ### 面向列的多函數(shù)應(yīng)用

tips = pd.read_csv('d:/data/tips.csv')

tips['tip_pct'] = tips['tip'] / tips['total_bill']

tips[:6]

grouped = tips.groupby(['sex', 'smoker'])

grouped_pct = grouped['tip_pct']

grouped_pct.agg('mean')

grouped_pct.agg(['mean', 'std', peak_to_peak])

grouped_pct.agg([('foo', 'mean'), ('bar', np.std)])

functions = ['count', 'mean', 'max']

result = grouped['tip_pct', 'total_bill'].agg(functions)

result

result['tip_pct']

ftuples = [('Durchschnitt', 'mean'), ('Abweichung', np.var)]

grouped['tip_pct', 'total_bill'].agg(ftuples)

grouped.agg({'tip' : np.max, 'size' : 'sum'})

grouped.agg({'tip_pct' : ['min', 'max', 'mean', 'std'],

'size' : 'sum'})

# ##分組級(jí)運(yùn)算和轉(zhuǎn)換

df

k1_means = df.groupby('key1').mean().add_prefix('mean_')

k1_means

pd.merge(df, k1_means, left_on='key1', right_index=True)

people

key = ['one', 'two', 'one', 'two', 'one']

people.groupby(key).mean()

people.groupby(key).transform(np.mean)

def demean(arr):

return arr - arr.mean()

demeaned = people.groupby(key).transform(demean)

demeaned

demeaned.groupby(key).mean()

# ### apply方法

def top(df, n=5, column='tip_pct'):

return df.sort_index(by=column)[-n:]

top(tips, n=6)

tips.groupby('smoker').apply(top)

tips.groupby(['smoker', 'day']).apply(top, n=1, column='total_bill')

result = tips.groupby('smoker')['tip_pct'].describe()

result

result.unstack('smoker')

#f = lambda x: x.describe()

#grouped.apply(f)

# ?禁止分組鍵

tips.groupby('smoker', group_keys=False).apply(top)

# ### 分位數(shù)和桶分析

frame = DataFrame({'data1': np.random.randn(1000),

'data2': np.random.randn(1000)})

factor = pd.cut(frame.data1, 4)

factor[:10]

def get_stats(group):

return {'min': group.min(), 'max': group.max(),

'count': group.count(), 'mean': group.mean()}

grouped = frame.data2.groupby(factor)

grouped.apply(get_stats).unstack()

grouping = pd.qcut(frame.data1, 10, labels=False)

grouped = frame.data2.groupby(grouping)

grouped.apply(get_stats).unstack()

# ### 用特定于分組的值填充缺失值

s = Series(np.random.randn(6))

s[::2] = np.nan

s

s.fillna(s.mean())

states = ['Ohio', 'New York', 'Vermont', 'Florida',

'Oregon', 'Nevada', 'California', 'Idaho']

group_key = ['East'] * 4 + ['West'] * 4

data = Series(np.random.randn(8), index=states)

data[['Vermont', 'Nevada', 'Idaho']] = np.nan

data

data.groupby(group_key).mean()

fill_mean = lambda g: g.fillna(g.mean())

data.groupby(group_key).apply(fill_mean)

fill_values = {'East': 0.5, 'West': -1}

fill_func = lambda g: g.fillna(fill_values[g.name])

data.groupby(group_key).apply(fill_func)

# ### 隨機(jī)采樣和排列

suits = ['H', 'S', 'C', 'D']

card_val = (range(1, 11) + [10] * 3) * 4

base_names = ['A'] + range(2, 11) + ['J', 'K', 'Q']

cards = []

for suit in ['H', 'S', 'C', 'D']:

cards.extend(str(num) + suit for num in base_names)

deck = Series(card_val, index=cards)

deck[:13]

def draw(deck, n=5):

return deck.take(np.random.permutation(len(deck))[:n])

draw(deck)

get_suit = lambda card: card[-1] #只要最后一個(gè)字母

deck.groupby(get_suit).apply(draw, n=2)

#不顯示分組關(guān)鍵字

deck.groupby(get_suit, group_keys=False).apply(draw, n=2)

# ### 分組加權(quán)平均數(shù)和相關(guān)系數(shù)

df = DataFrame({'category': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'],

'data': np.random.randn(8),

'weights': np.random.rand(8)})

df

grouped = df.groupby('category')

get_wavg = lambda g: np.average(g['data'], weights=g['weights'])

grouped.apply(get_wavg)

close_px = pd.read_csv('d:/data/stock_px.csv', parse_dates=True, index_col=0)

close_px.info()

close_px[-4:]

rets = close_px.pct_change().dropna()

spx_corr = lambda x: x.corrwith(x['SPX'])

by_year = rets.groupby(lambda x: x.year)

by_year.apply(spx_corr)

# 蘋果公司和微軟的年度相關(guān)系數(shù)

by_year.apply(lambda g: g['AAPL'].corr(g['MSFT']))

# ## 透視表

tips.pivot_table(index=['sex', 'smoker'])

tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],

columns='smoker')

tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],

columns='smoker', margins=True)

tips.pivot_table('tip_pct', index=['sex', 'smoker'], columns='day',

aggfunc=len, margins=True)

tips.pivot_table('size', index=['time', 'sex', 'smoker'],

columns='day', aggfunc='sum', fill_value=0)

# ### 交叉表

from StringIO import StringIO

data = """Sample ? ?Gender ? ?Handedness

1 ? ?Female ? ?Right-handed

2 ? ?Male ? ?Left-handed

3 ? ?Female ? ?Right-handed

4 ? ?Male ? ?Right-handed

5 ? ?Male ? ?Left-handed

6 ? ?Male ? ?Right-handed

7 ? ?Female ? ?Right-handed

8 ? ?Female ? ?Left-handed

9 ? ?Male ? ?Right-handed

10 ? ?Female ? ?Right-handed"""

data = pd.read_table(StringIO(data), sep='\s+')

data

pd.crosstab(data.Gender, data.Handedness, margins=True)

pd.crosstab([tips.time, tips.day], tips.smoker, margins=True)

# ## 2012聯(lián)邦選舉委員會(huì)數(shù)據(jù)分析

fec = pd.read_csv('d:/data/P00000001-ALL.csv')

fec.info()

fec.ix[123456]

unique_cands = fec.cand_nm.unique()

unique_cands

unique_cands[2]

parties = {'Bachmann, Michelle': 'Republican',

'Cain, Herman': 'Republican',

'Gingrich, Newt': 'Republican',

'Huntsman, Jon': 'Republican',

'Johnson, Gary Earl': 'Republican',

'McCotter, Thaddeus G': 'Republican',

'Obama, Barack': 'Democrat',

'Paul, Ron': 'Republican',

'Pawlenty, Timothy': 'Republican',

'Perry, Rick': 'Republican',

"Roemer, Charles E. 'Buddy' III": 'Republican',

'Romney, Mitt': 'Republican',

'Santorum, Rick': 'Republican'}

fec.cand_nm[123456:123461]

fec.cand_nm[123456:123461].map(parties)

fec['party'] = fec.cand_nm.map(parties)

fec['party'].value_counts()

(fec.contb_receipt_amt > 0).value_counts()

fec = fec[fec.contb_receipt_amt > 0]

fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', 'Romney, Mitt'])]

# #根據(jù)職業(yè)和雇主統(tǒng)計(jì)贊助信息

fec.contbr_occupation.value_counts()[:10]

occ_mapping = {

'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',

'INFORMATION REQUESTED' : 'NOT PROVIDED',

'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED',

'C.E.O.': 'CEO'

}

# If no mapping provided, return x

f = lambda x: occ_mapping.get(x, x)

fec.contbr_occupation = fec.contbr_occupation.map(f)

emp_mapping = {

'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',

'INFORMATION REQUESTED' : 'NOT PROVIDED',

'SELF' : 'SELF-EMPLOYED',

'SELF EMPLOYED' : 'SELF-EMPLOYED',

}

# If no mapping provided, return x

f = lambda x: emp_mapping.get(x, x)

fec.contbr_employer = fec.contbr_employer.map(f)

by_occupation = fec.pivot_table('contb_receipt_amt',

index='contbr_occupation',

columns='party', aggfunc='sum')

over_2mm = by_occupation[by_occupation.sum(1) > 2000000]

over_2mm

over_2mm.plot(kind='barh')

def get_top_amounts(group, key, n=5):

totals = group.groupby(key)['contb_receipt_amt'].sum()

# Order totals by key in descending order

return totals.order(ascending=False)[-n:]

grouped = fec_mrbo.groupby('cand_nm')

grouped.apply(get_top_amounts, 'contbr_occupation', n=7)

grouped.apply(get_top_amounts, 'contbr_employer', n=10)

# #對(duì)出資額分組

bins = np.array([0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000])

labels = pd.cut(fec_mrbo.contb_receipt_amt, bins)

labels

grouped = fec_mrbo.groupby(['cand_nm', labels])

grouped.size().unstack(0)

bucket_sums = grouped.contb_receipt_amt.sum().unstack(0)

bucket_sums

normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0)

normed_sums

normed_sums[:-2].plot(kind='barh', stacked=True)

# #根據(jù)州統(tǒng)計(jì)贊助信息

grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st'])

totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0)

totals = totals[totals.sum(1) > 100000]

totals[:10]

percent = totals.div(totals.sum(1), axis=0)

percent[:10]

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個(gè)濱河市辛友,隨后出現(xiàn)的幾起案子薄扁,更是在濱河造成了極大的恐慌,老刑警劉巖废累,帶你破解...
    沈念sama閱讀 218,858評(píng)論 6 508
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件泌辫,死亡現(xiàn)場(chǎng)離奇詭異,居然都是意外死亡九默,警方通過查閱死者的電腦和手機(jī),發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 93,372評(píng)論 3 395
  • 文/潘曉璐 我一進(jìn)店門宾毒,熙熙樓的掌柜王于貴愁眉苦臉地迎上來驼修,“玉大人,你說我怎么就攤上這事诈铛∫腋鳎” “怎么了?”我有些...
    開封第一講書人閱讀 165,282評(píng)論 0 356
  • 文/不壞的土叔 我叫張陵幢竹,是天一觀的道長(zhǎng)耳峦。 經(jīng)常有香客問我,道長(zhǎng)焕毫,這世上最難降的妖魔是什么蹲坷? 我笑而不...
    開封第一講書人閱讀 58,842評(píng)論 1 295
  • 正文 為了忘掉前任驶乾,我火速辦了婚禮,結(jié)果婚禮上循签,老公的妹妹穿的比我還像新娘级乐。我一直安慰自己,他們只是感情好县匠,可當(dāng)我...
    茶點(diǎn)故事閱讀 67,857評(píng)論 6 392
  • 文/花漫 我一把揭開白布风科。 她就那樣靜靜地躺著,像睡著了一般乞旦。 火紅的嫁衣襯著肌膚如雪贼穆。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 51,679評(píng)論 1 305
  • 那天兰粉,我揣著相機(jī)與錄音故痊,去河邊找鬼。 笑死亲桦,一個(gè)胖子當(dāng)著我的面吹牛崖蜜,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播客峭,決...
    沈念sama閱讀 40,406評(píng)論 3 418
  • 文/蒼蘭香墨 我猛地睜開眼豫领,長(zhǎng)吁一口氣:“原來是場(chǎng)噩夢(mèng)啊……” “哼!你這毒婦竟也來了舔琅?” 一聲冷哼從身側(cè)響起等恐,我...
    開封第一講書人閱讀 39,311評(píng)論 0 276
  • 序言:老撾萬榮一對(duì)情侶失蹤,失蹤者是張志新(化名)和其女友劉穎备蚓,沒想到半個(gè)月后课蔬,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 45,767評(píng)論 1 315
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡郊尝,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 37,945評(píng)論 3 336
  • 正文 我和宋清朗相戀三年二跋,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片流昏。...
    茶點(diǎn)故事閱讀 40,090評(píng)論 1 350
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡扎即,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出况凉,到底是詐尸還是另有隱情谚鄙,我是刑警寧澤更哄,帶...
    沈念sama閱讀 35,785評(píng)論 5 346
  • 正文 年R本政府宣布葛账,位于F島的核電站,受9級(jí)特大地震影響续担,放射性物質(zhì)發(fā)生泄漏知市。R本人自食惡果不足惜傻盟,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 41,420評(píng)論 3 331
  • 文/蒙蒙 一速蕊、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧莫杈,春花似錦互例、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 31,988評(píng)論 0 22
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至关顷,卻和暖如春糊秆,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背议双。 一陣腳步聲響...
    開封第一講書人閱讀 33,101評(píng)論 1 271
  • 我被黑心中介騙來泰國(guó)打工痘番, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留,地道東北人平痰。 一個(gè)月前我還...
    沈念sama閱讀 48,298評(píng)論 3 372
  • 正文 我出身青樓汞舱,卻偏偏與公主長(zhǎng)得像,于是被迫代替她去往敵國(guó)和親宗雇。 傳聞我的和親對(duì)象是個(gè)殘疾皇子昂芜,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 45,033評(píng)論 2 355

推薦閱讀更多精彩內(nèi)容