pyspark 学习笔记

from pyspark.sql import SparkSession

spark = SparkSession 
    .builder 
    .appName("Python Spark SQL basic example") 
    .config("spark.some.config.option", "some-value") 
    .getOrCreate()
#创建一个DataFrame
df = spark.sparkContext.parallelize([(1, 2, 3, 'a b c'),
             (4, 5, 6, 'd e f'),
             (7, 8, 9, 'g h i')]).toDF(['col1', 'col2', 'col3','col4'])
Employee = spark.createDataFrame([
                        ('1', 'Joe',   '70000', '1'),
                        ('2', 'Henry', '80000', '2'),
                        ('3', 'Sam',   '60000', '2'),
                        ('4', 'Max',   '90000', '1')],
                        ['Id', 'Name', 'Sallary','DepartmentId']
                         )
#查看数据类型
print(Employee)
print(Employee.printSchema())
print(Employee.dtypes)
#更改数据类型
Employee=Employee.withColumn('Sallary',Employee.Sallary.cast('int'))

  

原文地址:https://www.cnblogs.com/mahailuo/p/11002148.html