In [1]:
library(SparkR)


Attaching package: ‘SparkR’

The following objects are masked from ‘package:stats’:

    cov, filter, lag, na.omit, predict, sd, var

The following objects are masked from ‘package:base’:

    colnames, colnames<-, endsWith, intersect, rank, rbind, sample,
    startsWith, subset, summary, table, transform


In [2]:
sc <- sparkR.init("local[*]", "SparkR", sparkPackages="com.databricks:spark-csv_2.10:1.5.0")
sqlContext <- sparkRSQL.init(sc)


Launching java with spark-submit command /opt/Simba/bin/spark-submit  --packages com.databricks:spark-csv_2.10:1.5.0 sparkr-shell /tmp/RtmpGSOMSq/backend_port733429902b30 

In [3]:
schema <- structType(structField("tag", "string"), structField("x", "double"), structField("y", "double"))

In [4]:
points <- read.df(sqlContext, "/opt/points.txt", source = "com.databricks.spark.csv", schema = schema)
print(collect(points))


  tag x y
1   A 1 1
2   B 2 2
3   C 3 3
4   D 4 4
5   E 5 5
6   F 6 6
7   G 7 7

In [5]:
registerTempTable(points, "points")
sql = "SELECT * FROM points WHERE POINT(x, y) IN CIRCLERANGE(POINT(4.5, 4.5), 2)"
print(collect(sql(sqlContext,sql)))


  tag x y
1   D 4 4
2   E 5 5

In [ ]: