2.?case class People(name : String, age : Int) 的定義需要放在方法的作用域之外(即Java的成員變量位置)
實際上只需要做到第二點即可解決錯誤,如下:
import org.apache.spark.{SparkContext, SparkConf}/**
* Created by ESRI on 2017/6/14.
*/
object sqltest2 {case class Person(name: String, age: Int)def main(args: Array[String]) {println("I Love You Scala")System.setProperty("hadoop.home.dir", "E:\\bigdataTools\\hadoop\\hadoop-2.6.0\\hadoop-2.6.0")val conf = new SparkConf().setMaster("local").setAppName("wordCount")val sc = new SparkContext(conf)val sqlContext = new org.apache.spark.sql.SQLContext(sc)import sqlContext.implicits._// Define the schema using a case class.
// Note: Case classes in Scala 2.10 can support only up to 22 fields. To work around this limit,
// you can use custom classes that implement the Product interface.
// Create an RDD of Person objects and register it as a table.
val people = sc.textFile("E:\\testData\\spark\\spark1.6\\people.txt").map(_.split(",")).map(p => Person(p(0).trim.toString, p(1).trim.toInt)).toDF()people.registerTempTable("people")// SQL statements can be run by using the sql methods provided by sqlContext.
val teenagers = sqlContext.sql("SELECT name, age FROM people WHERE age >= 13 AND age <= 19")// The results of SQL queries are DataFrames and support all the normal RDD operations.
// The columns of a row in the result can be accessed by field index:
teenagers.map(t => "Name: " + t(0)).collect().foreach(println)// or by field name:
teenagers.map(t => "Name: " + t.getAs[String]("name")).collect().foreach(println)// row.getValuesMap[T] retrieves multiple columns at once into a Map[String, T]
//teenagers.map(_.getValuesMap[Any](List("name", "age"))).collect().foreach(println)
}
}