In [1]:
import findspark
findspark.init('/Users/hanlei/Downloads/spark-2.2.0-bin-hadoop2.7')

In [2]:
from pyspark.sql import SparkSession

In [3]:
spark = SparkSession.builder.appName('Basics').getOrCreate()

In [5]:
df = spark.read.json('/Users/hanlei/Downloads/spark-2.2.0-bin-hadoop2.7/python/test_support/sql/people.json')

In [6]:
df.show()


+----+-------+
| age|   name|
+----+-------+
|null|Michael|
|  30|   Andy|
|  19| Justin|
+----+-------+


In [7]:
df.printSchema()


root
 |-- age: long (nullable = true)
 |-- name: string (nullable = true)


In [8]:
df.columns


Out[8]:
['age', 'name']

In [9]:
df.describe()


Out[9]:
DataFrame[summary: string, age: string, name: string]

In [10]:
df.describe().show()


+-------+------------------+-------+
|summary|               age|   name|
+-------+------------------+-------+
|  count|                 2|      3|
|   mean|              24.5|   null|
| stddev|7.7781745930520225|   null|
|    min|                19|   Andy|
|    max|                30|Michael|
+-------+------------------+-------+


In [11]:
from pyspark.sql.types import StructField, StringType, IntegerType, StructType

In [16]:
data_schema = [StructField('age', IntegerType(), True),
                StructField('name', StringType(), True)]

In [18]:
final_struc = StructType(fields=data_schema)

In [19]:
df = spark.read.json('/Users/hanlei/Downloads/spark-2.2.0-bin-hadoop2.7/python/test_support/sql/people.json', 
                     schema=final_struc)

In [20]:
df.printSchema()


root
 |-- age: integer (nullable = true)
 |-- name: string (nullable = true)


In [21]:
df.show()


+----+-------+
| age|   name|
+----+-------+
|null|Michael|
|  30|   Andy|
|  19| Justin|
+----+-------+


In [22]:
df['age']


Out[22]:
Column<b'age'>

In [23]:
type(df['age'])


Out[23]:
pyspark.sql.column.Column

In [24]:
df.select('age')


Out[24]:
DataFrame[age: int]

In [25]:
df.select('age').show()


+----+
| age|
+----+
|null|
|  30|
|  19|
+----+


In [26]:
type(df.select('age'))


Out[26]:
pyspark.sql.dataframe.DataFrame

In [27]:
df.head(2)


Out[27]:
[Row(age=None, name='Michael'), Row(age=30, name='Andy')]

In [28]:
df.head(2)[0]


Out[28]:
Row(age=None, name='Michael')

In [29]:
type(df.head(2)[0])


Out[29]:
pyspark.sql.types.Row

In [30]:
df.select(['age', 'name']).show()


+----+-------+
| age|   name|
+----+-------+
|null|Michael|
|  30|   Andy|
|  19| Justin|
+----+-------+


In [35]:
df.withColumn('double_age', df['age']*2).show()


+----+-------+----------+
| age|   name|double_age|
+----+-------+----------+
|null|Michael|      null|
|  30|   Andy|        60|
|  19| Justin|        38|
+----+-------+----------+


In [36]:
df.show()


+----+-------+
| age|   name|
+----+-------+
|null|Michael|
|  30|   Andy|
|  19| Justin|
+----+-------+


In [37]:
df.withColumnRenamed('age','my_new_age').show()


+----------+-------+
|my_new_age|   name|
+----------+-------+
|      null|Michael|
|        30|   Andy|
|        19| Justin|
+----------+-------+


In [38]:
df.createOrReplaceTempView('people')

In [39]:
results = spark.sql("SELECT * FROM people")

In [40]:
results.show()


+----+-------+
| age|   name|
+----+-------+
|null|Michael|
|  30|   Andy|
|  19| Justin|
+----+-------+


In [41]:
new_result = spark.sql("SELECT * FROM people WHERE age=30")

In [42]:
new_result.show()


+---+----+
|age|name|
+---+----+
| 30|Andy|
+---+----+


In [ ]:


In [ ]:


In [ ]: