import pandas as pd
import csv
"""
demo1
"""
# train_data1 = pd.read_csv("train_ori", sep="\t",quoting=csv.QUOTE_NONE) # 有些漢字pandas讀不到用這個
# 步驟1,查看采樣前數(shù)據(jù)(迭代序號3之前數(shù)據(jù)分布)
# before_add = pd.read_csv("111", sep="\t")
# print("# 迭代序號3之前數(shù)據(jù)分布")
# print(before_add['label'].value_counts())
# new_data = before_add[:12865]
# new_data.to_csv("./222", sep="\t", columns=None, index=None)
# print(new_data['label'].value_counts())
# 步驟2,查看采樣的數(shù)據(jù)(迭代序號3的1000條數(shù)據(jù)分布)
# test_2000 = pd.read_csv("test_2000", sep="\t")
# print("# 針對迭代序號3,這1000條數(shù)據(jù)分布")
# print(test_2000['label'].value_counts()) # light 131 # not_clickbait 869
"""
demo2
"""
# 制作新預(yù)測數(shù)據(jù)集,挑出100條。合并并輸出文件
# title_data = pd.read_csv("V2.test.content", sep="\t")
# S_data_p = title_data[title_data["label"] == "serious"]
# L_data_p = title_data[title_data["label"] == "light"]
# SL_data_p = pd.concat([S_data_p, L_data_p])[:50]
# N_data_p = title_data[title_data["label"] == "not_clickbait"][100:150]
# p_data = pd.concat([SL_data_p, N_data_p])
# p_data.to_csv("./predict_new.csv", sep="\t", index=None)
"""
demo3
"""
# 步驟1创泄,讀取兩種數(shù)據(jù)格式文件
# n1 = pd.read_csv("E:\騰訊項(xiàng)目\數(shù)據(jù)分析\label\label_from_mario_202010261.txt", sep="\t")
# n2 = pd.read_csv("E:\騰訊項(xiàng)目\數(shù)據(jù)分析\label\label_from_mario_20201027.txt", sep="\t")
#
# L_data = n1[n1["label"] == "light"]
# N_data = n1[n1["label"] == "not_clickbait"]
# print(L_data['label'].value_counts())
# print(N_data['label'].value_counts())
# 步驟2脉课,更換列的順序
# n1_pre_data = pd.concat([L_data, N_data])
# n1_pre_data = n1_pre_data[["cmsid", "label", "media_id", "title", "content", "entropy"]]
# L2_data = n2[n2["label"] == "light"]
# N2_data = n2[n2["label"] == "not_clickbait"]
# N2_data_ = pd.concat([L2_data, N2_data])
# N2_data_ = N2_data_[["cmsid", "label", "media_id", "title", "content", "entropy"]]
# N3_data_ = pd.concat([n1_pre_data, N2_data_])
# print(N3_data_['label'].value_counts())
# print(N3_data_.columns)
# # 步驟3炒刁,刪除序列
# n1_data = N3_data_.drop(["entropy"], axis=1)
# L_pre_data = n1_data[n1_data["label"] == "light"][:272]
# N_pre_data = n1_data[n1_data["label"] == "not_clickbait"][:728]
# n1_pre_data = pd.concat([L_pre_data, N_pre_data])
# n1_pre_data.to_csv("./pre_data.csv", sep="\t", index=None)
#
# L_tr_data = n1_data[n1_data["label"] == "light"][272:]
# N_tr_data = n1_data[n1_data["label"] == "not_clickbait"][728:2344]
# n1_tr_data = pd.concat([L_tr_data, N_tr_data])
# n1_tr_data.to_csv("./tr_data.csv", sep="\t", index=None)
#
# # 步驟4仔雷,第二次合并數(shù)據(jù)集
# LX = pd.read_csv("E:\騰訊項(xiàng)目\數(shù)據(jù)分析\lx", sep="\t")
# S_data = LX[LX["label"] == "serious"]
# L_data = LX[LX["label"] == "light"]
# N_data = LX[LX["label"] == "not_clickbait"]
# # print(LX['label'].value_counts())
# n2_tr_data = pd.concat([LX, n1_tr_data])
# n2_tr_data.to_csv("./all_data.csv", sep="\t", index=None)
# print(2222222222222222222)
# print(n2_tr_data['label'].value_counts())
#
""""
demo4
"""
# # 去重
# tr_data = pd.read_csv("tr_data.csv", sep="\t")
# print(tr_data.columns)
# tr_data.drop_duplicates(subset=['cmsid'],keep='first',inplace=True)
# print(tr_data["label"].value_counts())
# print("=====================================")
# all_data = pd.read_csv("all_data.csv", sep="\t")
# print(all_data["label"].value_counts())
# k = all_data.drop_duplicates(subset=['cmsid'],keep='first')
# print("======================================")
# print(k["label"].value_counts())
""""
demo5
"""
# 刪除空行
# tr_data = pd.read_csv("train_4", sep="\t")
# tr_data1 = tr_data.dropna()
# tr_data1.to_csv("train_04", sep="\t", index=None)
""""
demo6
"""
# t = open("V2.test.content", encoding="utf-8")
# g = open("V1", encoding="utf-8")
# tr_data_ = t.readlines()
# gr_data_ = g.readlines()
# print(len(tr_data_))
# print(type(tr_data_))
# with open("./dt", "w", encoding='utf-8') as f:
# for i in range(len(tr_data_)):
# if i == 0:
# f.write(tr_data_[0])
# continue
# k_ = tr_data_[i*4-3:i*4+1]
# g_ = gr_data_[i*4-3:i*4+1]
# # if i == 5:
# # print(k_)
# k = "".join(k_)
# g = "".join(g_)
# f.write(k)
# f.write(g)
"""demo7 打印正負(fù)標(biāo)簽比例
"""
# import pandas as pd
# from collections import Counter
#
# train_data = pd.read_csv("V2.test.content", sep="\t")
#
# print(train_data[:5])
# # v = Counter(train_data['label'].values)
# # print(type(v))
# # 打印正負(fù)標(biāo)簽比例
# print(dict(Counter(train_data['label'].values)))
# train_data = train_data.values.tolist()
# print(train_data[:10])
"""demo8 iloc
"""
# import pandas as pd
# c = [2,5,7]
# df = pd.read_csv("V1",sep="\t")
# df = df.iloc[[2,5,7]] # 或者df = df.iloc[]
# df.to_csv("0100",index=None,sep="\t")
"""demo9 loc
"""
# import pandas as pd
# df = pd.read_csv("robot_sql_result_20210108210552.txt", sep="\t")
# print(df.columns)
# dk = df.loc[:,"info"]
# dk.to_csv("robot_sql", index=None, sep="\t")
# print(dk.head())
"""demo10
"""
# import json
# f = open("robot_sql_result_20210108210552.txt",encoding='utf-8')
# g = open("robot_sql_r.txt", "w", encoding='utf-8')
# data = f.readlines()
# for i in range(len(data)):
# if i == 0:
# g.write("al_time\t" +
# "dcf_time\t" +
# "train_time\t" +
# "al_sample_time\t" +
# "al_preproc_time\t" +
# "train_extract_time\t" +
# "train_preproc_time\t" +
# "train_forecast_time\n")
# continue
# l = data[i].split("\t")
# a = json.loads(l[-1])
# print(a['time'])
# al_time = a['time']["al_end"]-a['time']["al_start"]
# dcf_time = a['time']["dcf_finish"]-a['time']["dcf_create"]
# train_time = a['time']["train_end"]-a['time']["train_start"]
# al_sample_time = a['time']["al_sample_end"]-a['time']["al_sample_start"]
# al_preproc_time = a['time']["al_preproc_end"]-a['time']["al_preproc_start"]
# train_extract_time = a['time']["train_extract_end"]-a['time']["train_extract_start"]
# train_preproc_time = a['time']["train_preproc_end"]-a['time']["train_preproc_start"]
# train_forecast_time = a['time']["train_forecast_end"]-a['time']["train_forecast_start"]
# # print(al_time)
# # print(dcf_time)
# g.write(str(al_time) + "\t" +
# str(dcf_time) + "\t" +
# str(train_time) + "\t" +
# str(al_sample_time) + "\t" +
# str(al_preproc_time) + "\t" +
# str(train_extract_time) + "\t" +
# str(train_preproc_time) + "\t" +
# str(train_forecast_time) + "\n")
"""demo11 robot項(xiàng)目 pd.to_numeric
"""
import numpy as np
# tr_data = pd.read_csv("202011_.txt", sep="\t")
# print(tr_data.shape)
# #(599011, 4)
# print(tr_data.columns)
#Index(['business_id', 'json_extract(data,'$.extra.media_id')',
# 'json_extract(data,'$.extra.title')',
# 'json_extract(data,'$.extra.content_html')'],
# dtype='object')
# # # 步驟1冷溶、去重
# tr_data.drop_duplicates(subset=['business_id'],keep='first',inplace=True)
# print(tr_data.shape) #(420877, 4)
# # # 步驟2院刁、刪除空行
# tr_data = tr_data.dropna()
# print(tr_data.shape) #(420843, 4)
# # tr_data.to_csv("202011.txt", sep="\t", index=None)
# # print("=====================================")
# # 步驟3糯钙、更換列名字
# tr_data.columns=['cmisid', 'media_id', 'title', 'content']
# # tr_data_ = tr_data['media_id'].apply(lambda x: x if re.search("^\d+$",str(x)) else np.nan)
# # tr_data = tr_data.dropna()
# # print(tr_data.shape)
# # 步驟4、刪除'media_id'非數(shù)字的行
# df = tr_data[pd.to_numeric(tr_data['media_id'], errors='coerce').notnull()]
# # print(df.shape)
# df.to_csv("20210103", sep="\t", index=None)
# df = pd.read_csv("20210103",sep="\t")
# df = df.iloc[:5000]
# df.to_csv("20210112",index=None,sep="\t")
"""demo12 替換
"""
# import pandas as pd
# df = pd.read_csv("./dt_new_2",sep="\t")
# # print(df['label'].value_counts())
# df['label'].replace('serious', 'light', inplace=True)
# df.to_csv("./dt_3",sep="\t")
"""demo13 shuffle
"""
# 方法1:
# import random
# with open("new_dedup","r",encoding="utf-8") as f:
# data = f.readlines()
# k = data[0]
# print(type(k))
# l = data[1:]
# print(type(l))
# random.shuffle(l)
# str_data = "".join(l)
# with open("new","w",encoding="utf-8") as f:
# f.write(k)
# f.write(str_data)
# 方法2:
# import pandas as pd
# df = pd.read_csv("robot_sql", sep="\t")
# df = df.sample(frac=1)
# df.to_csv("00k",sep="\t")
"""demo14 不放回sample
"""
# import pandas as pd
# tr_data = pd.read_csv("./train_random5863", sep="\t") #12863
# new = tr_data.sample(3863, replace=False)
# new.to_csv("train_random3863", sep="\t", index=None)
"""demo15 random.shuffle
"""
# import random
# import time
# random.seed(time.time()) # 保證每一次種子都是不重復(fù)的,根據(jù)時間戳生成種子序列
# lst = [1,2,3,4,5,6,7,8]
# random.shuffle(lst)
# print(lst)
"""demo16
"""
數(shù)據(jù)處理
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
- 文/潘曉璐 我一進(jìn)店門艺蝴,熙熙樓的掌柜王于貴愁眉苦臉地迎上來猬腰,“玉大人,你說我怎么就攤上這事猜敢」煤桑” “怎么了?”我有些...
- 文/不壞的土叔 我叫張陵缩擂,是天一觀的道長鼠冕。 經(jīng)常有香客問我,道長胯盯,這世上最難降的妖魔是什么懈费? 我笑而不...
- 正文 為了忘掉前任,我火速辦了婚禮陨闹,結(jié)果婚禮上楞捂,老公的妹妹穿的比我還像新娘薄坏。我一直安慰自己趋厉,他們只是感情好,可當(dāng)我...
- 文/花漫 我一把揭開白布胶坠。 她就那樣靜靜地躺著君账,像睡著了一般。 火紅的嫁衣襯著肌膚如雪沈善。 梳的紋絲不亂的頭發(fā)上乡数,一...
- 文/蒼蘭香墨 我猛地睜開眼,長吁一口氣:“原來是場噩夢啊……” “哼金度!你這毒婦竟也來了应媚?” 一聲冷哼從身側(cè)響起,我...
- 序言:老撾萬榮一對情侶失蹤猜极,失蹤者是張志新(化名)和其女友劉穎中姜,沒想到半個月后,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體跟伏,經(jīng)...
- 正文 獨(dú)居荒郊野嶺守林人離奇死亡丢胚,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
- 正文 我和宋清朗相戀三年,在試婚紗的時候發(fā)現(xiàn)自己被綠了受扳。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片嗜桌。...
- 正文 年R本政府宣布层亿,位于F島的核電站,受9級特大地震影響立美,放射性物質(zhì)發(fā)生泄漏匿又。R本人自食惡果不足惜,卻給世界環(huán)境...
- 文/蒙蒙 一建蹄、第九天 我趴在偏房一處隱蔽的房頂上張望碌更。 院中可真熱鬧,春花似錦洞慎、人聲如沸痛单。這莊子的主人今日做“春日...
- 文/蒼蘭香墨 我抬頭看了看天上的太陽旭绒。三九已至,卻和暖如春焦人,著一層夾襖步出監(jiān)牢的瞬間挥吵,已是汗流浹背。 一陣腳步聲響...
- 正文 我出身青樓矿辽,卻偏偏與公主長得像丹允,于是被迫代替她去往敵國和親歪沃。 傳聞我的和親對象是個殘疾皇子,可洞房花燭夜當(dāng)晚...
推薦閱讀更多精彩內(nèi)容
- 參考 PCM音頻采樣數(shù)據(jù)處理https://blog.csdn.net/leixiaohua1020/articl...
- ??(Panel Data,面板數(shù)據(jù))是在Numpy的基礎(chǔ)上開發(fā)的嫌松,是Python最強(qiáng)大的數(shù)據(jù)分析和探索工具之一沪曙,...
- 函數(shù)列表 關(guān)于YUV是什么怎么儲存http://www.cnblogs.com/azraelly/archive/...
- 前一陣子在梳理以前文章的時候贾陷,發(fā)現(xiàn)自己雖然總結(jié)了各種視音頻應(yīng)用程序缘眶,卻還缺少一個適合無視音頻背景人員學(xué)習(xí)的“最基礎(chǔ)...
- 海量數(shù)據(jù)處理:十道面試題與十個海量數(shù)據(jù)處理方法總結(jié) - chenhuan001 - 博客園