Spark-Streaming结合Mysql案例

前提:启动zookeeper和kafka,创建topic为wc

1、MysqlConnectPool.scala

package sparkstreaming

import java.sql.{Connection, DriverManager, ResultSet, Statement}
import java.text.SimpleDateFormat
import java.util.Date


/**
  * @author yangwj
  * @date 2020/8/5 10:25
  */
class MysqlConnectPool {

  private var connection: Connection = _

  private val driver = "com.mysql.jdbc.Driver"
  private val url = "jdbc:mysql://localhost:3306/spark?useUnicode=true&characterEncoding=utf-8&useSSL=false&autoReconnect=true"
  private val username = "root"
  private val password = "yang156122"

  /*** 创建mysql连接 ** @return*/
  def conn(): Connection = {
    if (connection == null) {
      println(this.driver)
      Class.forName(this.driver)
      connection = DriverManager.getConnection(this.url, this.username, this.password)
    }
    connection
  }

  //关闭连接
  def close(conn: Connection,stat: Statement): Unit = {
    try {
      if(!stat.isClosed() || stat!=null){
        stat.close()
      }
      if (!conn.isClosed() || conn != null) {
        conn.close()
      }

    } catch {
        case ex: Exception => {
              ex.printStackTrace()
      }
    }
  }

  //当前时间
  def currentTime():String = {
    val date = new Date()
    val time: String = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(date)
    time
  }

  //添加
  def insert(stat: Statement,field:String,times:Int):Int = {
    try {
      val result: Int = stat.executeUpdate(s"INSERT INTO `spark_test`( `create_time`,`field`, `times`) VALUES ( '${currentTime()}','${field}', ${times})")
      println(s"添加数据,返回值=>" + result)
      result
    }catch {
      case ex: Exception => {
        ex.printStackTrace() // 打印到标准err
        System.err.println("exception===>: ...")  // 打印到标准err
        0
      }
    }
  }

  //删除
  def delete(stat: Statement,field:String) = {
    try {
      val result: Int = stat.executeUpdate(s"DELETE FROM `spark_test` WHERE `field` = '${field}'")
      println(s"删除数据,返回值=>" + result)
    }catch {
      case ex: Exception => {
        ex.printStackTrace() // 打印到标准err
        System.err.println("exception===>: ...")  // 打印到标准err
      }
    }
  }

  //查询
  def selectByField(stat: Statement,field:String):ResultSet = {
    try {
      val result= stat.executeQuery(s"select * FROM `spark_test` WHERE `field` = '${field}'")
      println(s"查询数据,返回值=>" + result.wasNull())
      result
    }catch {
      case ex: Exception => {
        ex.printStackTrace() // 打印到标准err
        System.err.println("exception===>: ...")  // 打印到标准err
        null //todo 有待改进
      }
    }
  }

  def update(stat: Statement,field:String,times:Int): Unit = {
    try {
      val resUpdate = stat.executeUpdate(s"UPDATE `spark_test` SET `times` = '${times}'  WHERE `field` = '${field}'")
      println(s"更新数据,返回值=>" + resUpdate)
    }catch {
      case ex: Exception => {
        ex.printStackTrace() // 打印到标准err
        System.err.println("exception===>: ...")  // 打印到标准err
      }
    }
  }

}

2、KafkaDirectWordCountV3.scala

package sparkstreaming

import java.sql.{Connection, ResultSet, Statement}

import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Duration, StreamingContext}


object KafkaDirectWordCountV3 {

  def main(args: Array[String]): Unit = {

    //指定组名
    val group = "g001"
    //创建SparkConf
    val conf = new SparkConf().setAppName("KafkaDirectWordCount").setMaster("local[2]")
    //创建SparkStreaming,并设置间隔时间
    val ssc = new StreamingContext(conf, Duration(5000))
    //指定消费的 topic 名字
    val topic = "wc"
    //指定kafka的broker地址(sparkStream的Task直连到kafka的分区上,用更加底层的API消费,效率更高)
    val brokerList = "localhost:9092"

    //指定zk的地址,后期更新消费的偏移量时使用(以后可以使用Redis、MySQL来记录偏移量)
    val zkQuorum = "localhost:2181"
    //创建 stream 时使用的 topic 名字集合,SparkStreaming可同时消费多个topic
    val topics: Set[String] = Set(topic)

    //创建一个 ZKGroupTopicDirs 对象,其实是指定往zk中写入数据的目录,用于保存偏移量
    val topicDirs = new ZKGroupTopicDirs(group, topic)
    //获取 zookeeper 中的路径 "/g001/offsets/wc/"
    val zkTopicPath = s"${topicDirs.consumerOffsetDir}"

    //准备kafka的参数
    val kafkaParams = Map(
      //"deserializer.encoding" -> "GBK",
      "metadata.broker.list" -> brokerList,
      "group.id" -> group,
      //从头开始读取数据
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString
    )

    //zookeeper 的host 和 ip,创建一个 client,用于跟新偏移量量的
    //是zookeeper的客户端,可以从zk中读取偏移量数据,并更新偏移量
    val zkClient = new ZkClient(zkQuorum)

    //查询该路径下是否字节点(默认有字节点为我们自己保存不同 partition 时生成的)
    // /g001/offsets/wordcount/0/10001"
    // /g001/offsets/wordcount/1/30001"
    // /g001/offsets/wordcount/2/10001"
    //zkTopicPath  -> /g001/offsets/wordcount/
    val children = zkClient.countChildren(zkTopicPath)

    var kafkaStream: InputDStream[(String, String)] = null

    //如果 zookeeper 中有保存 offset,我们会利用这个 offset 作为 kafkaStream 的起始位置
    var fromOffsets: Map[TopicAndPartition, Long] = Map()

    //如果保存过 offset
    //注意:偏移量的查询是在Driver完成的
    if (children > 0) {
      for (i <- 0 until children) {
        // /g001/offsets/wordcount/0/10001

        // /g001/offsets/wordcount/0
        println(s"路径:${zkTopicPath}")
        val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
        // wordcount/0
        val tp = TopicAndPartition(topic, i)
        //将不同 partition 对应的 offset 增加到 fromOffsets 中
        // wordcount/0 -> 10001
        fromOffsets += (tp -> partitionOffset.toLong)
      }
      //Key: kafka的key   values: "hello tom hello jerry"
      //这个会将 kafka 的消息进行 transform,最终 kafak 的数据都会变成 (kafka的key, message) 这样的 tuple
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message())

      //通过KafkaUtils创建直连的DStream(fromOffsets参数的作用是:按照前面计算好了的偏移量继续消费数据)
      //[String, String, StringDecoder, StringDecoder,     (String, String)]
      //  key    value    key的解码方式   value的解码方式
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler)
    } else {
      //如果未保存,根据 kafkaParam 的配置使用最新(largest)或者最旧的(smallest) offset
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
    }

    //偏移量的范围
    var offsetRanges = Array[OffsetRange]()

    //如果你调用了DStream的Transformation,就不能使用直连方式
    //    val ds = kafkaStream.map(_._2).flatMap(_.split(" ")).map((_, 1))
    //    ds.foreachRDD(rdd => {
    //      //当前的这个RDD已经不是KafkaRDD了,就不能获取到从kafka中读取的偏移量
    //      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
    //    })
    //

    //直连方式只有在KafkaDStream的RDD(KafkaRDD)中才能获取偏移量,那么就不能到调用DStream的Transformation
    //所以只能子在kafkaStream调用foreachRDD,获取RDD的偏移量,然后就是对RDD进行操作了
    //依次迭代KafkaDStream中的KafkaRDD
    //如果使用直连方式累加数据,那么就要在外部的数据库中进行累加(用KeyVlaue的内存数据库(NoSQL),Redis)
    kafkaStream.foreachRDD { kafkaRDD =>
      //只有KafkaRDD可以强转成HasOffsetRanges,并获取到偏移量
      offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
      //获取数据
      val lines: RDD[String] = kafkaRDD.map(_._2)

      //对RDD进行操作,触发Action
      lines.foreachPartition(partition =>
        partition.foreach(x => {
          //写业务逻辑 ---来一个wc吧
          println(x)
          val tuples: Array[(String, Int)] = x.split(" ").map((_, 1))
          val grouped: Map[String, Array[(String, Int)]] = tuples.groupBy(_._1)
          val wordAndCount: Map[String, Int] = grouped.mapValues(_.length)
          val sorted: List[(String, Int)] = wordAndCount.toList.sortBy(- _._2)

          //写入mysql
          for (i <- 0 until sorted.size) {
            val pool = new MysqlConnectPool
            val conn: Connection = pool.conn()
            val stat: Statement = conn.createStatement()
            var field = sorted(i)._1
            val times = sorted(i)._2
            println(s"数据更新开始...........")
            val set: ResultSet = pool.selectByField(stat,field)
            if(set.next()){
              println(s"数据更新循环")
              val dbfield: String = set.getString("field")
              val dbtimes: Int = set.getInt("times")
              println(s"数据库查出数据为:dbfield=${dbfield},dbtimes=${dbtimes}")
              val resUpdate = pool.update(stat,field,times+dbtimes)
            }else {
              println(s"数据入库开始...........")
              val result: Int =pool.insert(stat,field,times)
            }
            pool.close(conn,stat)
          }

        })
      )

      for (o <- offsetRanges) {
        //  /g001/offsets/wordcount/0
        val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}"
        //将该 partition 的 offset 保存到 zookeeper
        //  /g001/offsets/wordcount/0/20000
        println(s"保存的路径为:${zkPath},保存的偏移量为:${o.untilOffset.toString}")
        ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString)
      }
    }

    ssc.start()
    ssc.awaitTermination()

  }


}

 3、pom.xml文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.yangwj</groupId>
    <artifactId>spark</artifactId>
    <version>1.0-SNAPSHOT</version>

    <properties>
        <maven.compiler.source>1.8</maven.compiler.source>
        <maven.compiler.target>1.8</maven.compiler.target>
        <scala.version>2.11.8</scala.version>
        <spark.version>2.2.0</spark.version>
        <encoding>UTF-8</encoding>


        <java.version>1.8</java.version>
        <hadoop.version>2.7.7</hadoop.version>
        <hbase.version>2.0.5</hbase.version>
        <spring-data-hadoop.version>2.4.0</spring-data-hadoop.version>
    </properties>
    <dependencies>
        <!--        ml库-->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>
        <!--        spark-streaming-->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>
        <!--        ml库-->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-mllib_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.codehaus.janino</groupId>
            <artifactId>janino</artifactId>
            <version>3.0.8</version>
        </dependency>
        <!-- 导入scala的依赖 -->
        <dependency>
            <groupId>org.scala-lang</groupId>
            <artifactId>scala-library</artifactId>
            <version>${scala.version}</version>
        </dependency>

        <!-- 导入spark的依赖 -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <!-- 导入sparksql的依赖 -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/redis.clients/jedis -->
        <dependency>
            <groupId>redis.clients</groupId>
            <artifactId>jedis</artifactId>
            <version>2.9.0</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.12</version>
        </dependency>

    </dependencies>
</project>
View Code
原文地址:https://www.cnblogs.com/ywjfx/p/13453733.html