mapPartitions

mapPartitions操作与 map类似,只不过映射的参数由RDD中的每一个元素变成了RDD中每一个分区的迭代器,如果映射过程需要频繁创建额外的对象,使用mapPartitions操作要比map操作效率高效许多。比如将RDD中的所有数据通过JDBC链接写入数据库,如果使用map函数,可能要为每个元素创建一个connection,开销很大。如果使用mapPartitions,那么只需要针对一个分区建立connection.

Scala中的yield的主要作用是记住每次迭代中的有关值,并逐一存入到一个数组中。

for {子句} yield {变量或表达式}

scala> val numrdd=sc.makeRDD(1 to 10,3)

numrdd: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[51] at makeRDD at <console>:25

scala> def sumn(iter:Iterator[Int])={val aa=for(i<-iter) yield i*2;aa.toIterator}

sumn: (iter: Iterator[Int])Iterator[Int]

scala> numrdd.mapPartitions(sumn).collect

res49: Array[Int] = Array(2, 4, 6, 8, 10, 12, 14, 16, 18, 20)

-----------------------------------------------

分区中的数值求和


scala> val numRDD=sc.makeRDD(1 to 10,3)
numRDD: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[210] at makeRDD at <console>:25

scala> numRDD.mapPartitions(x=>{val result=List(); var i=0;while(x.hasNext){i+=x.next()};result.::(i).toIterator}).collect
res136: Array[Int] = Array(6, 15, 34)

scala> numRDD.mapPartitions(x=>{

val result=List();

var i=0;

while(x.hasNext)

{

i+=x.next()

};

result.::(i).toIterator

}

).collect
res136: Array[Int] = Array(6, 15, 34)

-------------------------------------------------------------

scala> val numRDD=sc.makeRDD(1 to 10,3)

numRDD: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[4] at makeRDD at <console>:24

scala> def partionsum(iter:Iterator[Int])={var result=List[Int]();var i:Int= 0;while(iter.hasNext){var n:Int=iter.next; i += n;} ;result.::(i).toIterator}
partionsum: (iter: Iterator[Int])Iterator[Int]

scala> def partionsum(iter:Iterator[Int])={

var result=List[Int]();

var i:Int= 0;

while(iter.hasNext){

var n:Int=iter.next;

i += n;

} ;

result.::(i).toIterator

}
partionsum: (iter: Iterator[Int])Iterator[Int]

scala> numRDD.mapPartitions(partionsum).collect

res7: Array[Int] = Array(6, 15, 34)

 --------------------------------------

分区内的数值进行求和,并展示分区号

scala> val numRDD=sc.makeRDD(1 to 10,3)

numRDD: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[4] at makeRDD at <console>:24

scala> numRDD.mapPartitionsWithIndex((x,iter)=>{val result=List(); var i=0;while(iter.hasNext){i+=iter.next()};result.::(x+"|"+i).toIterator}).collect
res138: Array[String] = Array(0|6, 1|15, 2|34)

scala> numRDD.mapPartitionsWithIndex((x,iter)=>{

val result=List();

var i=0;

while(iter.hasNext){

i+=iter.next()

};

result.::(x+"|"+i).toIterator

}).collect

res138: Array[String] = Array(0|6, 1|15, 2|34)

------------------------------

scala> val numRDD=sc.makeRDD(1 to 10,3)

numRDD: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[4] at makeRDD at <console>:24

scala> def partionwithindexsum(x:Int,iter:Iterator[Int])={var result=List[Int]();var i:Int= 0;while(iter.hasNext){var n:Int=iter.next; i += n;} ;result.::(x+"|"+i).toIterator} partionwithindexsum: (x: Int, iter: Iterator[Int])Iterator[Any]

scala> def partionwithindexsum(x:Int,iter:Iterator[Int])={

var result=List[Int]();

var i:Int= 0;

while(iter.hasNext){

var n:Int=iter.next;

i += n;

} ;

result.::(x+"|"+i).toIterator

}

partionwithindexsum: (x: Int, iter: Iterator[Int])Iterator[Any]

scala> numRDD.mapPartitionsWithIndex(partionwithindexsum).collect

res9: Array[Any] = Array(0|6, 1|15, 2|34)

----------------------

统计每个分区的元素数

scala> val numRDD=sc.makeRDD(1 to 10,3)

numRDD: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[4] at makeRDD at <console>:24

scala> def partionwithindexlength(x:Int,iter:Iterator[Int])={var result=List[Int]();var i:Int= iter.toList.length;result.::(x+"|"+i).toIterator}

partionwithindexlength: (x: Int, iter: Iterator[Int])Iterator[Any]

scala> def partionwithindexlength(x:Int,iter:Iterator[Int])={

var result=List[Int]();

var i:Int= iter.toList.length;

result.::(x+"|"+i).toIterator

}

partionwithindexlength: (x: Int, iter: Iterator[Int])Iterator[Any]

scala> numRDD.mapPartitionsWithIndex(partionwithindexlength).collect

res10: Array[Any] = Array(0|3, 1|3, 2|4)

原文地址:https://www.cnblogs.com/playforever/p/9450531.html