Spark實驗五

2.編程實現將 RDD 轉換爲 DataFrame
利用反射來推斷包含特定類型對象的 RDD 的 schema,適用對已知數據結構的 RDD轉換
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.Encoder
import spark.implicits._
object RDDtoDF {
 def main(args: Array[String]) {
case class Employee(id:Long,name: String, age: Long)
val employeeDF = 
spark.sparkContext.textFile("file:///usr/local/spark/employee.txt").map(_.split(",")).map(at
tributes => Employee(attributes(0).trim.toInt,attributes(1), attributes(2).trim.toInt)).toDF()
employeeDF.createOrReplaceTempView("employee")
val employeeRDD = spark.sql("select id,name,age from employee")
employeeRDD.map(t => "id:"+t(0)+","+"name:"+t(1)+","+"age:"+t(2)).show()
 } }

3. 編程實現利用 DataFrame 讀寫 MySQL 的數據

import java.util.Properties
import org.apache.spark.sql.types._
import org.apache.spark.sql.Row
object TestMySQL {
 def main(args: Array[String]) {
val employeeRDD = spark.sparkContext.parallelize(Array("3 Mary F 26","4 Tom M 
23")).map(_.split(" "))
val schema = StructType(List(StructField("id", IntegerType, 
true),StructField("name", StringType, true),StructField("gender", StringType, 
true),StructField("age", IntegerType, true)))
val rowRDD = employeeRDD.map(p => Row(p(0).toInt,p(1).trim, 
p(2).trim,p(3).toInt))
val employeeDF = spark.createDataFrame(rowRDD, schema)
val prop = new Properties()
prop.put("user", "root") 
prop.put("password", "hadoop") 
prop.put("driver","com.mysql.jdbc.Driver")
employeeDF.write.mode("append").jdbc("jdbc:mysql://localhost:3306/sparktest", 
sparktest.employee", prop)
val jdbcDF = spark.read.format("jdbc").option("url", 
"jdbc:mysql://localhost:3306/sparktest").option("driver","com.mysql.jdbc.Driver").optio
n("dbtable","employee").option("user","root").option("password", "hadoop").load()
jdbcDF.agg("age" -> "max", "age" -> "sum")
 } }

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章