使用sparkSQL的insert操作Kudu

可以选择使用Spark SQL直接使用INSERT语句写入Kudu表;与'append'类似,INSERT语句实际上将默认使用UPSERT语义处理;

import org.apache.kudu.spark.kudu._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

/**
  * Created by angel;
  */
object SparkSQL_insert {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("AcctfileProcess")
      //设置Master_IP并设置spark参数
      .setMaster("local")
      .set("spark.worker.timeout", "500")
      .set("spark.cores.max", "10")
      .set("spark.rpc.askTimeout", "600s")
      .set("spark.network.timeout", "600s")
      .set("spark.task.maxFailures", "1")
      .set("spark.speculationfalse", "false")
      .set("spark.driver.allowMultipleContexts", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkContext = SparkContext.getOrCreate(sparkConf)
    val sqlContext = SparkSession.builder().config(sparkConf).getOrCreate().sqlContext
    //TODO 1:定义表名
    val kuduTableName = "spark_kudu_tbl"
    val kuduMasters = "hadoop01:7051,hadoop02:7051,hadoop03:7051"
    //使用spark创建kudu表
    val kuduContext = new KuduContext(kuduMasters, sqlContext.sparkContext)
    //TODO 2:准备数据
    val srcTableData = Array(
      Customer("enzo", 43, "oakland"),
      Customer("laura", 27, "vancouver"))
    import sqlContext.implicits._
    //TODO 3:配置kudu参数
    val kuduOptions: Map[String, String] = Map(
      "kudu.table"  -> kuduTableName,
      "kudu.master" -> kuduMasters)
    //TODO 4:创建dataframe
    val srcTableDF = sparkContext.parallelize(srcTableData).toDF()

    //TODO 5:创建临时表1
    srcTableDF.registerTempTable("source_table")

    //TODO 6:创建临时表2
    sqlContext.read.options(kuduOptions).kudu.registerTempTable(kuduTableName)

    //TODO 7:使用sparkSQL的insert操作插入数据
    sqlContext.sql(s"INSERT INTO TABLE $kuduTableName SELECT * FROM source_table")

    //TODO 8:查询数据
    sqlContext.read.options(kuduOptions).kudu.show()
  }
}
原文地址:https://www.cnblogs.com/niutao/p/10555356.html