In [1]:
var sparkConf = new SparkConf().setAppName("JavaScriptSparkSQL").setMaster("local[*]");
var ctx = new SparkContext(sparkConf);
var sqlContext = new SQLContext(ctx);
In [2]:
// Load a text file and convert each line to a JavaScript Object.
var people = ctx.textFile("../data/people.txt").map(function(line) {
var parts = line.split(",");
return person = {
name: parts[0],
age: parseInt(parts[1].trim())
};
});
In [3]:
//Generate the schema
var fields = [];
fields.push(DataTypes.createStructField("name", DataTypes.StringType, true));
fields.push(DataTypes.createStructField("age", DataTypes.IntegerType, true));
var schema = DataTypes.createStructType(fields);
Out[3]:
In [4]:
// Convert records of the RDD (people) to Rows.
var rowRDD = people.map(function(person){
return RowFactory.create([person.name, person.age]);
});
In [5]:
//Apply the schema to the RDD.
var peopleDataFrame = sqlContext.createDataFrame(rowRDD, schema);
// Register the DataFrame as a table.
peopleDataFrame.registerTempTable("people");
In [10]:
// SQL can be run over RDDs that have been registered as tables.
var results = sqlContext.sql("SELECT name FROM people");
//The results of SQL queries are DataFrames and support all the normal RDD operations.
//The columns of a row in the result can be accessed by ordinal.
var names = results.toRDD().map(function(row) {
return "Name: " + row.getString(0);
});
JSON.stringify(names.take(10))
Out[10]:
In [ ]: