scala,spark练习题提高

1.求每家公司有哪些产品

val arr3 = List("Apache" -> "Spark", "Apache" -> "Kafka", "Oracle" -> "JAVA", "Oracle" -> "DB ORACLE", "Oracle" -> "Mysql");
    val rdd2 = sc.makeRDD(arr3).aggregateByKey(List[String]())((strings:List[String],str:String)=>str::strings,(strings:List[String],strings0:List[String])=>strings ::: strings0)
    val rdd3 = sc.makeRDD(arr3).groupByKey()

2.验证par方法

(0 to 5).par.collect{case _ => Thread.currentThread.getName}.distinct.foreach(println)

package spark01

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
  * yong rdd处理一个不规则的数组
  *
  * Created by lq on 2017/8/11.
  */
object MkRdd {
  def test4(): Unit = {
    val conf = new SparkConf().setAppName("WC").setMaster("local[2]")
    val sc = new SparkContext(conf)
    val arr = List(("Apache" -> "Spark"), ("Apache" -> "Kafka"), ("Oracle" -> "JAVA", "Oracle" -> "DB ORACLE", "Oracle" -> "Mysql"));
    //flatmap是每次传入一个元素返回一个集合(可以是新建的)
    val rdd = sc.makeRDD(arr).flatMap(t => {
      t match {
        case s:Tuple2[String, String] => List(s)
        case _ => t.productIterator.toList
      }
    }).map(t=>{t.asInstanceOf[Tuple2[String,String]]}).groupByKey()
    //.aggregate(ListBuffer[String]())((x:ListBuffer[String],y)=>{x.+(y.asInstanceOf[String]);x},(m,n)=>{m++n})
    println(rdd.collect.toBuffer)
  }

  /**
    * aggregateByKey的例子
    */
  def test5(): Unit ={
    val conf = new SparkConf().setAppName("WC").setMaster("local[2]")
    val sc = new SparkContext(conf)

    val arr2 = List(("Apache" -> "Spark"), ("Apache" -> "Kafka"), ("Oracle" -> "JAVA"), ("Oracle" -> "DB ORACLE"), ("Oracle" -> "Mysql"));
    val arr3 = List("Apache" -> "Spark", "Apache" -> "Kafka", "Oracle" -> "JAVA", "Oracle" -> "DB ORACLE", "Oracle" -> "Mysql");
    val rdd2 = sc.makeRDD(arr3).aggregateByKey(List[String]())((strings:List[String],str:String)=>str::strings,(strings:List[String],strings0:List[String])=>strings ::: strings0)
    val rdd3 = sc.makeRDD(arr3).groupByKey()
    println(rdd2.collect().toBuffer)
    //println(rdd3.collect().toBuffer)

  }

  /**
    * aggregate 的例子,求数组的平均数
    */
  def test6(): Unit ={
    val arr = List(1,2,3,4,5,6,7,8,9)
    val res = arr.aggregate(0,0)((acc,number)=>(acc._1+number,acc._2+1),(par1,par2)=>(par1._1+par2._1,par1._2+par2._2))
    //并行化
    val res1 = arr.par.aggregate(0,0)((acc,number)=>(acc._1+number,acc._2+1),(par1,par2)=>(par1._1+par2._1,par1._2+par2._2))
    val res2  = res._1/res._2
    println(res2)
  }
  //  求出每个同学的平均成绩(要求,不要直接/3)
def test77: Unit = {
val conf = new SparkConf().setAppName("WC").setMaster("local[2]")
val sc = new SparkContext(conf)
//数据格式
// Lily,math,98
//Lily,english,98
//aggregateByKey 与reduce的区别 aggregateBykey可以改变返回数据的类型而reduce不能
val rdd1 = sc.textFile("D:\___WORK\workSpaceHome\temp\study3\mySpark\score.dat")
.map(_.split(","))
.map(t=>{(t(0),(t(1),t(2).toInt))})//(Lily,(math,98))
.aggregateByKey((mutable.Set[String](),0))(
(m, n)=>{
(m._1 + n._1, m._2+n._2)
},//局部计算,m代表的是初始值,n代表的是迭代值
(x,y)=>{//x,y代表的是每个分区的值
((x._1 | y._1),(x._2+y._2))
}//整体计算
).map(t=>{(t._1,t._2._2/t._2._1.size)})//t._1是之前的key,t._2为aggregateByKey计算过之后的value 格式是(mutable.Set[String](),0)
//.groupByKey().map(t=>{(t._1._1,t._1._2/t._2.size)})
println(rdd1.collect().toBuffer)
}
object AggregateByKeyOp { def main(args:Array[String]){ val sparkConf: SparkConf = new SparkConf().setAppName("AggregateByKey").setMaster("local") val sc: SparkContext = new SparkContext(sparkConf) val data=List((1,3),(1,2),(1,4),(2,3)) val rdd=sc.parallelize(data, 2) //合并不同partition中的值,a,b得数据类型为zeroValue的数据类型 def combOp(a:String,b:String):String={ println("combOp: "+a+"	"+b) a+b } //合并在同一个partition中的值,a的数据类型为zeroValue的数据类型,b的数据类型为原value的数据类型 def seqOp(a:String,b:Int):String={ println("SeqOp:"+a+"	"+b) a+b } rdd.foreach(println) //zeroValue:中立值,定义返回value的类型,并参与运算 //seqOp:用来在同一个partition中合并值 //combOp:用来在不同partiton中合并值 val aggregateByKeyRDD=rdd.aggregateByKey("100")(seqOp, combOp) println(aggregateByKeyRDD.collect().toBuffer) //ArrayBuffer((2,1003), (1,100321004)) 由于并行之后得到两个分区,在对两个分区调用这个方法时,每个分区中中立值都会使用一次 //此时可以查看分区情况分析当前结果  sc.stop() } }

case 的另一种使用场景


//这种写法可以在使用偏函数时变量更清晰
def ip2Long(ipAddress: String): Long = {
ipAddress.split("\.").zipWithIndex.foldLeft(0L) {
case (result, (ip, index)) ⇒ {
result + (ip.toLong << ((3-index) * 8))
}
}
}
//最初的写法
def ip2Long2(ipAddress: String): Long = {
ipAddress.split("\.").zipWithIndex.foldLeft(0L)((result,t)=>{
result + (t._1.toLong << ((3-t._2) * 8))
})
}

原文地址:https://www.cnblogs.com/rocky-AGE-24/p/7350795.html