spark 數(shù)據(jù)建模準(zhǔn)備
去重
#初始化spark
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[*]").appName("shuangyu").getOrCreate()
df = spark.createDataFrame([(1,144.5,5.9,33,'M'),
(2,167.2,5.4,45,'M'),
(3,124.1,5.2,23,'F'),
(4,144.5,5.9,33,'M'),
(5,133.2,5.7,54,'F'),
(3,124.1,5.2,23,'F'),
(5,129.2,5.3,42,'M')],["id","weight","height","age","gender"])
#分別打印dataframe未去重和去重后的行數(shù)
print("count of rows: {}".format(df.count()))
print("count of distinct rows: {}".format(df.distinct().count()))
count of rows: 7
count of distinct rows: 6
#去掉重復(fù)的行
df = df.dropDuplicates()
df.show()
+---+------+------+---+------+
| id|weight|height|age|gender|
+---+------+------+---+------+
| 5| 133.2| 5.7| 54| F|
| 5| 129.2| 5.3| 42| M|
| 1| 144.5| 5.9| 33| M|
| 4| 144.5| 5.9| 33| M|
| 2| 167.2| 5.4| 45| M|
| 3| 124.1| 5.2| 23| F|
+---+------+------+---+------+
#計(jì)算排除id后是否有重復(fù)的數(shù)據(jù)
print("counts of ids: {}".format(df.count()))
print("counts of distinct ids: {}".format(df.select([c for c in df.columns if c != "id"]).distinct().count()))
counts of ids: 6
counts of distinct ids: 5
#發(fā)現(xiàn)有2行出去ID外其它都是重復(fù)的,現(xiàn)在要去掉其中的一行
df = df.dropDuplicates(subset = [c for c in df.columns if c != "id"])
df.show()
+---+------+------+---+------+
| id|weight|height|age|gender|
+---+------+------+---+------+
| 5| 133.2| 5.7| 54| F|
| 1| 144.5| 5.9| 33| M|
| 2| 167.2| 5.4| 45| M|
| 3| 124.1| 5.2| 23| F|
| 5| 129.2| 5.3| 42| M|
+---+------+------+---+------+
#ok.現(xiàn)在來(lái)計(jì)算下是否有重復(fù)的ID
import pyspark.sql.functions as fn #導(dǎo)入spark sql的一些函數(shù)
df.agg(fn.count("id").alias("count"),
fn.countDistinct("id").alias("distinct")).show()
+-----+--------+
|count|distinct|
+-----+--------+
| 5| 4|
+-----+--------+
#發(fā)現(xiàn)有重復(fù)的ID,我們可能需要重新給每行數(shù)據(jù)分分配唯一的新的ID來(lái)標(biāo)示它們
df.withColumn("newId",fn.monotonically_increasing_id()).show()
#withColums 新增一列
#monotonically_increasing_id 生成唯一自增ID
+---+------+------+---+------+-------------+
| id|weight|height|age|gender| newId|
+---+------+------+---+------+-------------+
| 5| 133.2| 5.7| 54| F| 25769803776|
| 1| 144.5| 5.9| 33| M| 171798691840|
| 2| 167.2| 5.4| 45| M| 592705486848|
| 3| 124.1| 5.2| 23| F|1236950581248|
| 5| 129.2| 5.3| 42| M|1365799600128|
+---+------+------+---+------+-------------+
數(shù)據(jù)缺失
df_miss = spark.createDataFrame([(1,143.5,5.6,28,'M',10000),
(2,167.2,5.4,45,'M',None),
(3,None,5.2,None,None,None),
(4,144.5,5.9,33,'M',None),
(5,133.2,5.7,54,'F',None),
(6,124.1,5.2,None,'F',None),
(7,129.2,5.3,42,'M',76000)],
['id','weight','height','age','gender','income'])
#統(tǒng)計(jì)每一行缺失的數(shù)據(jù)量
df_miss.rdd.map(lambda row: (row['id'],sum([c == None for c in row]))).collect()
[(1, 0), (2, 1), (3, 4), (4, 1), (5, 1), (6, 2), (7, 0)]
#第三行數(shù)據(jù)缺失有點(diǎn)多,來(lái)看一下第三行數(shù)據(jù)
df_miss.where('id == 3').show()
+---+------+------+----+------+------+
| id|weight|height| age|gender|income|
+---+------+------+----+------+------+
| 3| null| 5.2|null| null| null|
+---+------+------+----+------+------+
#統(tǒng)計(jì)每列數(shù)據(jù)缺失情況
df_miss.agg(*[(1-(fn.count(c)/fn.count('*'))).alias(c + "_miss") for c in df_miss.columns]).show()
+-------+------------------+-----------+------------------+------------------+------------------+
|id_miss| weight_miss|height_miss| age_miss| gender_miss| income_miss|
+-------+------------------+-----------+------------------+------------------+------------------+
| 0.0|0.1428571428571429| 0.0|0.2857142857142857|0.1428571428571429|0.7142857142857143|
+-------+------------------+-----------+------------------+------------------+------------------+
#income列缺失太多莫湘,基本無(wú)用了,現(xiàn)在要去掉這一列數(shù)據(jù)
df_miss_no_income = df_miss.select([c for c in df_miss.columns if c != "income"])
df_miss_no_income.show()
+---+------+------+----+------+
| id|weight|height| age|gender|
+---+------+------+----+------+
| 1| 143.5| 5.6| 28| M|
| 2| 167.2| 5.4| 45| M|
| 3| null| 5.2|null| null|
| 4| 144.5| 5.9| 33| M|
| 5| 133.2| 5.7| 54| F|
| 6| 124.1| 5.2|null| F|
| 7| 129.2| 5.3| 42| M|
+---+------+------+----+------+
#某些行缺失的數(shù)據(jù)也比較多郑气,現(xiàn)在去除掉這些行
#thresh=3 表示一行中非NONE的數(shù)據(jù)少于3個(gè)則去除該行
df_miss_no_income.dropna(thresh=3).show()
+---+------+------+----+------+
| id|weight|height| age|gender|
+---+------+------+----+------+
| 1| 143.5| 5.6| 28| M|
| 2| 167.2| 5.4| 45| M|
| 4| 144.5| 5.9| 33| M|
| 5| 133.2| 5.7| 54| F|
| 6| 124.1| 5.2|null| F|
| 7| 129.2| 5.3| 42| M|
+---+------+------+----+------+
#只要含有NONE則去除該行
df_miss_no_income.dropna().show()
+---+------+------+---+------+
| id|weight|height|age|gender|
+---+------+------+---+------+
| 1| 143.5| 5.6| 28| M|
| 2| 167.2| 5.4| 45| M|
| 4| 144.5| 5.9| 33| M|
| 5| 133.2| 5.7| 54| F|
| 7| 129.2| 5.3| 42| M|
+---+------+------+---+------+
#為none值填充新值
means = df_miss_no_income.agg(*[fn.mean(c).alias(c)
for c in df_miss_no_income.columns if c != 'gender'])\
.toPandas().to_dict('records')[0]
means['gender'] = "missing"
print(means)
#df.fillna(dict) 填充df中的none值,dict中以各個(gè)col字段作為key腰池,要填充的值作為value
df_miss_no_income.fillna(means).show()
{'age': 40.4, 'height': 5.471428571428571, 'gender': 'missing', 'weight': 140.28333333333333, 'id': 4.0}
+---+------------------+------+---+-------+
| id| weight|height|age| gender|
+---+------------------+------+---+-------+
| 1| 143.5| 5.6| 28| M|
| 2| 167.2| 5.4| 45| M|
| 3|140.28333333333333| 5.2| 40|missing|
| 4| 144.5| 5.9| 33| M|
| 5| 133.2| 5.7| 54| F|
| 6| 124.1| 5.2| 40| F|
| 7| 129.2| 5.3| 42| M|
+---+------------------+------+---+-------+
異常值
df_outliers = spark.createDataFrame([(1,143.5,5.3,28),
(2,154.2,5.5,45),
(3,342.3,5.1,99),
(4,144.5,5.5,33),
(5,133.2,5.4,54),
(6,124.1,5.1,21),
(7,129.2,5.3,42)],["id","weight","height","age"])
cols = ["weight","height","age"]
#bounds,用來(lái)存儲(chǔ)后面生成的各個(gè)字段值的邊界
bounds = {}
for col in cols:
#涉及統(tǒng)計(jì)中的4分位尾组。計(jì)算Q1和Q3
quantiles = df_outliers.approxQuantile(col, [0.25,0.75], 0.05)
#計(jì)算4分位距
IQR = quantiles[1] - quantiles[0]
#計(jì)算內(nèi)限
bounds[col] = [quantiles[0] - 1.5*IQR, quantiles[1] + 1.5*IQR]
print("bounds: ",bounds)
#判斷是否為異常值,在內(nèi)限之外的值為異常值
outliers = df_outliers.select(*['id'] + \
[((df_outliers[c] < bounds[c][0]) | (df_outliers[c] > bounds[c][1]) )\
.alias(c +"_o") for c in cols])
outliers.show()
bounds: {'age': [-11.0, 93.0], 'height': [4.499999999999999, 6.1000000000000005], 'weight': [91.69999999999999, 191.7]}
+---+--------+--------+-----+
| id|weight_o|height_o|age_o|
+---+--------+--------+-----+
| 1| false| false|false|
| 2| false| false|false|
| 3| true| false| true|
| 4| false| false|false|
| 5| false| false|false|
| 6| false| false|false|
| 7| false| false|false|
+---+--------+--------+-----+
#查詢出異常值
df_outliers = df_outliers.join(outliers,on = 'id')
#上面的join語(yǔ)句不要寫(xiě)成 df_outliers.join(outliers, df_outliers.id == outliers.id) 否則在
#新生成的 df_outliers中會(huì)有2列id示弓,后面在select時(shí)會(huì)報(bào)錯(cuò)AnalysisException: "Reference 'id' is ambiguous
df_outliers.show()
+---+------+------+---+--------+--------+-----+
| id|weight|height|age|weight_o|height_o|age_o|
+---+------+------+---+--------+--------+-----+
| 7| 129.2| 5.3| 42| false| false|false|
| 6| 124.1| 5.1| 21| false| false|false|
| 5| 133.2| 5.4| 54| false| false|false|
| 1| 143.5| 5.3| 28| false| false|false|
| 3| 342.3| 5.1| 99| true| false| true|
| 2| 154.2| 5.5| 45| false| false|false|
| 4| 144.5| 5.5| 33| false| false|false|
+---+------+------+---+--------+--------+-----+
df_outliers.filter('weight_o').select('id','weight').show()
+---+------+
| id|weight|
+---+------+
| 3| 342.3|
+---+------+
df_outliers.filter("age_o").select("id","age").show()
+---+---+
| id|age|
+---+---+
| 3| 99|
+---+---+