解決方案:
val targetTableSchemaArray = spark.catalog.listColumns(dbName, tableName).
select("name", "dataType", "isPartition", "isBucket").
rdd.map(catalog => {
val name = catalog.getAs("name").toString
val typeName = catalog.getAs("dataType").toString
val isPartition = catalog.getAs("isPartition").toString.toBoolean
val isBucket = catalog.getAs("isBucket").toString.toBoolean
(name, typeName, isPartition, isBucket)
}).collect()
targetTableSchemaArray.foreach(x => {
val name = x._1
val typeName = x._2
//判讀字段名稱和字段類型是否和目標表一致疏叨,如果不一致谢揪,拋出異常
val sourcetypeName = sourceSchemaMap.getOrElse(name, "None")
sourcetypeName match {
case "None" => throw new Exception(s"Source table not exist ${name} column")
case typeName => println("yes")
case _ => throw new Exception(s"Inconsistent table structure types ,details:spark -> ${name}:${sourcetypeName} \t hive -> ${name}:${typeName} ")
}
})