In [2]:
import pyspark,pyspark.sql
text_file = sc.textFile("deneme.txt")
counts = text_file.flatMap(lambda line: line.split(" ")) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b)
counts.saveAsTextFile("url5.txt")
print('Islem Tamamlandi...')
In [23]:
In [8]:
sc.statusTracker()
Out[8]:
In [ ]:
In [ ]:
In [ ]: