取某一列的分位數(shù)
#Python:
df.approxQuantile("x", [0.5], 0.25)
#Scala:
df.stat.approxQuantile("x", Array(0.5), 0.25)
#第一個(gè)參數(shù)是列名,第二個(gè)參數(shù)為分位數(shù)尚镰,第三個(gè)參數(shù)為精度, 0表示完全沒有誤差
取出a_df中有鹰椒,b_df中沒有的行
a_df.except(b_df)
spark json array string
//假設(shè)string長(zhǎng)成下面這樣子
import spark.implicits._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
val egDF = Seq("""[{"text": "哈哈哈哈哈", "no": "hehe"},{"text": "嘻嘻嘻嘻", "no": "2"}]""",
"""[{"text": "噢噢噢噢", "no": "1"},{"text": "嘎嘎嘎嘎", "no": "2"}]""").toDF("value")
val schema = ArrayType(StructType(
Seq(StructField("text", StringType),StructField("no", StringType))
))
val jsonDF = egDF.withColumn("text_no_arr", from_json(col("value"), schema)).
withColumn("text_no", explode(col("text_no_arr"))).select(col("text_no")("text"))
// example2
val egDF = Seq("""{"text": "哈哈哈哈哈", "no": "hehe"}""",
"""{"text": "噢噢噢噢", "no": "1"}""").toDF("value")
val schema = StructType(
Seq(StructField("text", StringType),StructField("no", StringType))
)
val jsonDF = egDF.withColumn("text_no_arr", from_json(col("value"), schema)).
select(col("text_no_arr")("text"))
spark sql在讀取hive表數(shù)據(jù)結(jié)構(gòu)的時(shí)候經(jīng)常會(huì)遇到struct類型的列票灰,這種情況下對(duì)于后續(xù)數(shù)據(jù)處理是非常不方便的蔬墩,下面給出一個(gè)將struct類型轉(zhuǎn)為spark里面自定義的class類型的例子
//struct類型的字段處理成case class類型
import spark.implicits._
case class Location(lat: Double, lon: Double)
val eg_df = Seq((10, Location(35, 25)), (20, Location(45, 35))).toDF
// 方法一
val eg_ds = eg_df.map { row =>
(row: @unchecked) match {
case Row(a: Int, Row(b: Double, c: Double)) => (a, Location(b, c))
}
}
// 方法二
val eg_ds2 = eg_df.map(row => {
(row.getInt(0), Location(row.getStruct(1).getDouble(0), row.getStruct(1).getDouble(1)))
})
spark添加一列自增的id斤贰,使用rdd中的zipWithIndex方法
image.png
// 在原Schema信息的基礎(chǔ)上添加一列 “id”信息
val schema: StructType = dataframe.schema.add(StructField("id", LongType))
// DataFrame轉(zhuǎn)RDD 然后調(diào)用 zipWithIndex
val dfRDD: RDD[(Row, Long)] = dataframe.rdd.zipWithIndex()
val rowRDD: RDD[Row] = dfRDD.map(tp => Row.merge(tp._1, Row(tp._2)))
// 將添加了索引的RDD 轉(zhuǎn)化為DataFrame
val df2 = spark.createDataFrame(rowRDD, schema)
df2.show()
scala class和json string相互轉(zhuǎn)換
case class DeviceSeqObj(device_id: String,
text_id_click_seq: String,
text_tag_click_seq: String
)
val pushClickSeqFeatureDF = pushClickSeqFeature(jobConfig, spark).as[DeviceSeqObj].
map(row => {
val device_id = row.device_id
val gson = new Gson
val device_push_seq_feature = gson.toJson(row, classOf[DeviceSeqObj])
(device_id, device_push_seq_feature)
}).toDF("device_id", "feature")
val df = pushClickSeqFeatureDF.map(row => {
val feature = row.getString(row.fieldIndex("feature"))
val gson = new Gson
val device_push_seq_feature = gson.fromJson(feature, classOf[DeviceSeqObj])
device_push_seq_feature
})
spark dag圖計(jì)算優(yōu)化可能會(huì)改變執(zhí)行順序
val opPushDF = deviceInfo.join(broadcast(opTextDS), "explore").
withColumn("isSelected", isSelectedUDF(col("flow_ratio"))).
where(col("isSelected") === 1)
由于udf依賴的列只在opTextDS中出現(xiàn)篡帕,因此代碼執(zhí)行過程中會(huì)先在opTextDS中選出一部分行殖侵,然后和deviceInfo進(jìn)行join
而事實(shí)上,我們期望的執(zhí)行的效果是對(duì)于device+text的pair對(duì)進(jìn)行隨機(jī)選擇镰烧。
這個(gè)種情況下可以冗余一個(gè)deviceInfo中的字段拢军,確保udf在join之后生效
val opPushDF = deviceInfo.join(broadcast(opTextDS), "explore").
withColumn("isSelected", isSelectedUDF(col("device_id"),col("flow_ratio"))).
where(col("isSelected") === 1)