#-*-coding:utf-8-*-
import csv
import os
#記錄已存在的date.csv
#將words寫(xiě)入date.csv文件最后一行踊谋,文件打開(kāi)采用'a'模式癣防,即在原文件后添加(add)
def writeByDate(words):
file_name ="1111.csv"
os.chdir('../data/')
f = open(file_name,'a',newline='')
write = csv.writer(f)
write.writerow(words)
f.close()
#主函數(shù)
def splitByDate():
f = open("../data/yu.csv")
rows = csv.reader(f)
for row in rows:
for i in range(len(row)):
row[i] = int(row[i])
if(row[i]<10):
print(row[i])
splitByDate()
#-*-coding:utf-8-*-
"""
將tianchi_mobile_recommend_train_user.csv按照日期分割為31份**.csv文件哥纫,放在'/data/date/'目錄下。
生成的**.csv文件內(nèi)容格式如下:
user_id, item_id, behavior_type,user_geohash,item_category, hour
99512554,37320317, 3, 94gn6nd, 9232, 20
"""
import csv
import os
#記錄已存在的date.csv
date_dictionary = {}
#將words寫(xiě)入date.csv文件最后一行浸卦,文件打開(kāi)采用'a'模式程帕,即在原文件后添加(add)
def writeByDate(date,words):
file_name = date+".csv"
os.chdir('../data/date/')
if date in date_dictionary:
f = open(file_name,'a',newline='')
write = csv.writer(f)
write.writerow(words)
f.close()
else:
date_dictionary[date] = True
f = open(file_name,'a',newline='')
write = csv.writer(f)
write.writerow(['user_id','item_id','behavior_type','user_geohash','item_category','hour'])
write.writerow(words)
f.close()
os.chdir('../../data/') #回到本代碼所在的上一級(jí)文件
#主函數(shù)
def splitByDate():
os.mkdir('../data/date/')
f = open("../data/tianchi_fresh_comp_train_user.csv")
rows = csv.reader(f)
header = next(rows )
for row in rows:
date = row[-1].split(" ")[0]
hour = row[-1].split(" ")[1]
words = row[0:-1]
words.append(hour)
writeByDate(date,words)
splitByDate()
#-*-coding:utf-8-*-
"""
對(duì)于某一天的數(shù)據(jù)按四種操作切割:瀏覽,收藏瞳氓,加入購(gòu)物車(chē)策彤,購(gòu)買(mǎi)
"""
import csv
import os
#記錄已存在的date.csv
behavior_dictionary = {}
#將words寫(xiě)入date.csv文件最后一行,文件打開(kāi)采用'a'模式匣摘,即在原文件后添加(add)
def writeByDate(behavior,words):
file_name = behavior+".csv"
os.chdir('../data/behaviour_type/')
if not behavior in behavior_dictionary:
behavior_dictionary[behavior] = True
f = open(file_name,'a')
write = csv.writer(f)
write.writerow(['user_id','item_id','behavior_type','user_geohash','item_category','date'])
write.writerow(words)
f.close()
else:
f = open(file_name,'a')
write = csv.writer(f)
write.writerow(words)
f.close()
os.chdir('../../data/')
#主函數(shù)
def splitByDate():
os.mkdir('../data/behaviour_type/')
f = open("../data/2014-12-18.csv")
rows = csv.reader(f)
for row in rows:
behavior = row[2]
words = row[0:-1]
words.append(behavior)
writeByDate(behavior,words)
splitByDate()
文章來(lái)自于[Merge CSV Files Into One Large CSV File In Windows 7](http://www.solveyourtech.com/merge-csv-files/)店诗。經(jīng)[測(cè)試](http://lib.csdn.net/base/softwaretest),win7以上版本的windows都可以音榜。步驟如下:
將所有的csv文件放到一個(gè)文件夾必搞,位置任意。
打開(kāi)cmd囊咏,切換到存放csv的文件夾恕洲,也可以在csv文件夾中,按住shift加鼠標(biāo)右鍵梅割,選擇在此處打開(kāi)命令窗口霜第。
輸入**copy *.csv all-groups.csv**,all-group的名字户辞,可以任意泌类。然后按enter,等待完成就可以了底燎。
打開(kāi)csv文件夾就可以看到all-group.csv
excel不適合操作大量的數(shù)據(jù)刃榨,建議將csv導(dǎo)入到[數(shù)據(jù)庫(kù)](http://lib.csdn.net/base/mysql)中去。
怎么讀取超大數(shù)據(jù)的csv文件呢双仍?
注意:csv文件默認(rèn)格式是gbk而不是utf-8.
import pandas as pd
df = pd.read_csv('JData_User.csv',encoding='gbk')
print(df)
pandas讀取超大數(shù)據(jù)文件
import pandas as pd
df = pd.read_csv('JData_Action_201604.csv',encoding='gbk')
df2 = pd.read_csv('JData_Product.csv',encoding='gbk')
#==============================================================================
# df = df[df.type==2]
#==============================================================================
df = df[(df.type==3)|(df.type==4)]
#==============================================================================
# df.to_csv('wantpay_4.csv',encoding='utf-8')
#==============================================================================
result1 = pd.merge(df, df2, on=['sku_id'])
print(result1)
result1.to_csv('pay_or_cancel_4.csv',encoding='utf-8')
#==============================================================================
# result1 = pd.merge(df, df2, on=['sku_id'])
# print(result1)
# result1.to_csv('wantpay_product.csv',encoding='utf-8')
#==============================================================================
進(jìn)行時(shí)間格式轉(zhuǎn)換
import time
import datetime
time_original = '17/9/2012 11:40:00'
day = time_original.split(' ')[0]
time_format = datetime.datetime.strptime(time_original,'%d/%m/%Y %H:%M:%S')
#這里可以 print time_format 或者 直接 time_format 一下看看輸出結(jié)果枢希,默認(rèn)存儲(chǔ)為datetime格式
time_format = time_format.strftime('%Y%m%d%H%M%S')
print(time_format)
pandas中的刪除重復(fù)數(shù)據(jù)duplicates的用法
import pandas as pd
import time
import datetime
df = pd.read_csv('all.csv',encoding='gbk')
df = df.drop_duplicates(['user_id','sku_id'], keep=False)
df.to_csv('will_pay_4.csv',encoding='utf-8')
print(df)
import pandas as pd
#按日期截取csv文件中的數(shù)據(jù)
actions = pd.read_csv("2m.csv",encoding='gbk')
actions = actions[(actions.time >= '2016-03-15') & (actions.time < '2016-04-16')]
actions.to_csv('between_days.csv',encoding='utf-8')
將list寫(xiě)入txt:
fl=open('list.txt', 'w')
for i in lists:
fl.write(i)
fl.write("\n")
fl.close()
df = df.groupby(['user_id'], as_index=False).sum()#獲取所有對(duì)應(yīng)的['area']的總數(shù)
df =df.groupby(['area'], as_index=False).size()#獲取每個(gè)['area']的個(gè)數(shù)
df =df.groupby(['area'], as_index=False).mean()#獲取每個(gè)['area']的均值
import pandas as pd
df = pd.read_csv('submission.csv')
df['sort_id'] = df['lab'].groupby(df['user_id']).rank()
print(df)
求日期的前一天
from datetime import timedelta, datetime
yesterday = (datetime.strptime(str(end_date), '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')
print(yesterday)
#相隔的日期
a = (datetime.strptime(str('2016-04-06'), '%Y-%m-%d') - datetime.strptime(str('2016-04-01'), '%Y-%m-%d')).days
將任意格式的時(shí)間轉(zhuǎn)化為指定日期
results = pd.read_csv("nba_data.csv", parse_dates=["Date"])
將以毫秒計(jì)數(shù)的時(shí)間轉(zhuǎn)換為正常YMDHMS格式
all_ratings["Datetime"] = pd.to_datetime(all_ratings['Datetime'],unit='s')
簡(jiǎn)單小tips
print("There are {} movies with more than {} favorable reviews".format(3,4))
"""
There are 3 movies with more than 4 favorable reviews
"""
#除去‘user’在0-2之間的數(shù)據(jù)
df= df[~df['user'].isin(range(3))]
#顯示出常見(jiàn)的參數(shù)
adult["Hours-per-week"].describe()
"""
count 19115.000000
mean 40.393408
std 12.253508
min 1.000000
25% 40.000000
50% 40.000000
75% 45.000000
max 99.000000
Name: Hours-per-week, dtype: float64
"""
怎么求pandas中的間隔日期呢?
def get_gap_days():
ActData = pd.read_csv('../data/train.csv')
ActData['time'] = pd.to_datetime(ActData['clickTime'],format='%d%H%M')
time0 = ['1899-12-31 00:00:00']
time0 = pd.to_datetime(pd.Series(time0))
ActData['days'] = (ActData['time'] - time0.iat[0]).dropna()
ActData['days'] = np.int32(ActData['days'] / np.timedelta64(24, 'h'))
print(ActData)