In [1]:
    
import spylon
import spylon.spark as sp
c = sp.SparkConfiguration()
c._spark_home = "/path/to/spark-1.6.2-bin-hadoop2.6"
c.master = ["local[4]"]
    
In [2]:
    
(sc, sqlContext) = c.sql_context("MyApplicationName")
    
In [4]:
    
from spylon.spark.spark import SparkJVMHelpers
helpers = SparkJVMHelpers(sc)
    
In [6]:
    
rand = helpers.jvm.java.util.Random()
    
In [8]:
    
rand
    
    Out[8]:
In [11]:
    
print rand.__doc__
    
    
Call a method on that java object.
In [10]:
    
rand.nextInt(10000)
    
    Out[10]:
In [13]:
    
o = helpers.to_scala_seq([1, 2, 3, 4])
    
In [14]:
    
o
    
    Out[14]:
In [7]:
    
o.getClass().toString()
    
    Out[7]:
In [8]:
    
o.toString()
    
    Out[8]:
In [9]:
    
o.toList().toString()
    
    Out[9]:
In [10]:
    
m = helpers.to_scala_map({'a': 1, 'b': 2})
    
In [11]:
    
m.toString()
    
    Out[11]:
In [12]:
    
c = m.getClass()
    
In [13]:
    
c.getCanonicalName()
    
    Out[13]: