In [1]:
#kelime ayiklama
file = open('terminal.txt', 'r')
book = file.read()


def tokenize():
    if book is not None:
        words = book.lower().split()
        return words
    else:
        return None
        

def map_book(tokens):
    hash_map = {}

    if tokens is not None:
        for element in tokens:
            # Remove Punctuation
            word = element.replace(",","")
            word = word.replace(".","")

            # Word Exist?
            if word in hash_map:
                hash_map[word] = hash_map[word] + 1
            else:
                hash_map[word] = 1

        return hash_map
    else:
        return None


# Tokenize the Book
words = tokenize()
word_list = ['the','life','situations','since','day','hdfs','hadoop',]

# Create a Hash Map (Dictionary)
map = map_book(words)

# Show Word Information
for word in word_list:
    print ('Word: [' + word + '] Frequency: ' + str(map[word]))


Word: [the] Frequency: 9
Word: [life] Frequency: 1
Word: [situations] Frequency: 1
Word: [since] Frequency: 1
Word: [day] Frequency: 4
Word: [hdfs] Frequency: 11
Word: [hadoop] Frequency: 8

In [ ]:


In [ ]:


In [2]:
import threading
from time import sleep
result = "Not Set"
lock = threading.Lock()
def map_func(x):
    sleep(100)
    raise Exception("Task should have been cancelled")
def start_job(x):
    global result
    try:
        sc.setJobGroup("job_to_cancel", "some description")
        result = sc.parallelize(range(x)).map(map_func).collect()
    except Exception as e:
        result = "Cancelled"
    lock.release()
def stop_job():
    sleep(5)
    sc.cancelJobGroup("job_to_cancel")
supress = lock.acquire()
supress = threading.Thread(target=start_job, args=(10,)).start()
supress = threading.Thread(target=stop_job).start()
supress = lock.acquire()
print(result)
seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
print()
print(combOp)


Cancelled

<function <lambda> at 0x107b52e18>

In [3]:
import re, string

text_file = sc.textFile('terminal.txt')
text_file.take(123)#n adet satiri getirir.indexli olarak


Out[3]:
['',
 '[cloudera@quickstart ~]$ hdfs dfs -mkdir /usr/cloudera',
 "mkdir: `/usr/cloudera': No such file or directory",
 '[cloudera@quickstart ~]$ hdfs dfs -mkdir /user/cloudera/WordCount',
 '[cloudera@quickstart ~]$ hdfs dfs -mkdir /user/cloudera/WordCount/input',
 '[cloudera@quickstart ~]$ hdfs dfs -put /home/cloudera/Desktop/input.txt user/cloudera/WordCount/input',
 "put: `user/cloudera/WordCouhdfs nt/input': No such file or directory",
 '[cloudera@quickstart ~]$ hdfs dfs -put /home/cloudera/Desktop/input.txt /user/cloudera/WordCount/input',
 '[cloudera@quickstart ~]$ hadoop dfs -jar /home/cloudera/D',
 'Desktop/   Documents/ Downloads/ ',
 '[cloudera@quickstart ~]$ ',
 '[cloudera@quickstart ~]$ hadoop dfs -jar /home/cloudera/Desktop/wordcount.jar wordcount wordcoun^C/user/cloudera/WordCount/input /user/cloudera/WordCount/output ',
 '[cloudera@quickstart ~]$ ^C',
 '[cloudera@quickstart ~]$ hdfs dfs -put /home/cloudera/Desktop/input.txt /user/cloudera/WordCount/input',
 "put: `/user/cloudera/WordCount/input/input.txt': File exists",
 '[cloudera@quickstart ~]$ hadoop dfs -jar /home/cloudera/Desktop/Untitled.jar wordcount.wordcount /user/cloudera/Desktop/input /user/cloudera/Desktop/output',
 'DEPRECATED: Use of this script to execute hdfs command is deprecated.',
 'Instead use the hdfs command for it.',
 '',
 '-jar: Unknown command',
 '[cloudera@quickstart ~]$ hadoop dfs jar /home/cloudera/Desktop/Untitled.jar wordcount.wordcount /user/cloudera/Desktop/input /user/cloudera/Desktop/output',
 'DEPRECATED: Use of this script to execute hdfs command is deprecated.',
 'Instead use the hdfs command for it.',
 '',
 'jar: Unknown command',
 '[cloudera@quickstart ~]$ hadoop jar /home/cloudera/Desktop/Untitled.jar wordcount.wordcount /user/cloudera/Desktop/input /user/cloudera/Desktop/output',
 '16/08/24 04:15:05 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032',
 '16/08/24 04:15:06 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.',
 '16/08/24 04:15:06 INFO mapreduce.JobSubmitter: Cleaning up the staging area /tmp/hadoop-yarn/staging/cloudera/.staging/job_1471515428094_0001',
 '16/08/24 04:15:06 WARN security.UserGroupInformation: PriviledgedActionException as:cloudera (auth:SIMPLE) cause:org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: hdfs://quickstart.cloudera:8020/user/cloudera/Desktop/input',
 'Exception in thread "main" org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: hdfs://quickstart.cloudera:8020/user/cloudera/Desktop/input',
 '\tat org.apache.hadoop.mapreduce.lib.input.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:323)',
 '\tat org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:265)',
 '\tat org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:387)',
 '\tat org.apache.hadoop.mapreduce.JobSubmitter.writeNewSplits(JobSubmitter.java:305)',
 '\tat org.apache.hadoop.mapreduce.JobSubmitter.writeSplits(JobSubmitter.java:322)',
 '\tat org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:200)',
 '\tat org.apache.hadoop.mapreduce.Job$10.run(Job.java:1307)',
 '\tat org.apache.hadoop.mapreduce.Job$10.run(Job.java:1304)',
 '\tat java.security.AccessController.doPrivileged(Native Method)',
 '\tat javax.security.auth.Subject.doAs(Subject.java:415)',
 '\tat org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)',
 '\tat org.apache.hadoop.mapreduce.Job.submit(Job.java:1304)',
 '\tat org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1325)',
 '\tat wordcount.wordcount.main(wordcount.java:62)',
 '\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)',
 '\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)',
 '\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)',
 '\tat java.lang.reflect.Method.invoke(Method.java:606)',
 '\tat org.apache.hadoop.util.RunJar.run(RunJar.java:221)',
 '\tat org.apache.hadoop.util.RunJar.main(RunJar.java:136)',
 '[cloudera@quickstart ~]$ hadoop jar /home/cloudera/Desktop/Untitled.jar wordcount.wordcount /user/cloudera/WordCount/input  /user/cloudera/WordCount/output',
 '16/08/24 04:16:34 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032',
 '16/08/24 04:16:34 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.',
 '16/08/24 04:16:35 INFO input.FileInputFormat: Total input paths to process : 1',
 '16/08/24 04:16:35 INFO mapreduce.JobSubmitter: number of splits:1',
 '16/08/24 04:16:35 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1471515428094_0002',
 '16/08/24 04:16:36 INFO impl.YarnClientImpl: Submitted application application_1471515428094_0002',
 '16/08/24 04:16:36 INFO mapreduce.Job: The url to track the job: http://quickstart.cloudera:8088/proxy/application_1471515428094_0002/',
 '16/08/24 04:16:36 INFO mapreduce.Job: Running job: job_1471515428094_0002',
 '16/08/24 04:16:47 INFO mapreduce.Job: Job job_1471515428094_0002 running in uber mode : false',
 '16/08/24 04:16:47 INFO mapreduce.Job:  map 0% reduce 0%',
 '16/08/24 04:16:54 INFO mapreduce.Job:  map 100% reduce 0%',
 '16/08/24 04:17:03 INFO mapreduce.Job:  map 100% reduce 100%',
 '16/08/24 04:17:03 INFO mapreduce.Job: Job job_1471515428094_0002 completed successfully',
 '16/08/24 04:17:03 INFO mapreduce.Job: Counters: 49',
 '\tFile System Counters',
 '\t\tFILE: Number of bytes read=330',
 '\t\tFILE: Number of bytes written=227863',
 '\t\tFILE: Number of read operations=0',
 '\t\tFILE: Number of large read operations=0',
 '\t\tFILE: Number of write operations=0',
 '\t\tHDFS: Number of bytes read=313',
 '\t\tHDFS: Number of bytes written=220',
 '\t\tHDFS: Number of read operations=6',
 '\t\tHDFS: Number of large read operations=0',
 '\t\tHDFS: Number of write operations=2',
 '\tJob Counters ',
 '\t\tLaunched map tasks=1',
 '\t\tLaunched reduce tasks=1',
 '\t\tData-local map tasks=1',
 '\t\tTotal time spent by all maps in occupied slots (ms)=4946',
 '\t\tTotal time spent by all reduces in occupied slots (ms)=6071',
 '\t\tTotal time spent by all map tasks (ms)=4946',
 '\t\tTotal time spent by all reduce tasks (ms)=6071',
 '\t\tTotal vcore-seconds taken by all map tasks=4946',
 '\t\tTotal vcore-seconds taken by all reduce tasks=6071',
 '\t\tTotal megabyte-seconds taken by all map tasks=5064704',
 '\t\tTotal megabyte-seconds taken by all reduce tasks=6216704',
 '\tMap-Reduce Framework',
 '\t\tMap input records=1',
 '\t\tMap output records=27',
 '\t\tMap output bytes=285',
 '\t\tMap output materialized bytes=330',
 '\t\tInput split bytes=136',
 '\t\tCombine input records=27',
 '\t\tCombine output records=26',
 '\t\tReduce input groups=26',
 '\t\tReduce shuffle bytes=330',
 '\t\tReduce input records=26',
 '\t\tReduce output records=26',
 '\t\tSpilled Records=52',
 '\t\tShuffled Maps =1',
 '\t\tFailed Shuffles=0',
 '\t\tMerged Map outputs=1',
 '\t\tGC time elapsed (ms)=125',
 '\t\tCPU time spent (ms)=1350',
 '\t\tPhysical memory (bytes) snapshot=332062720',
 '\t\tVirtual memory (bytes) snapshot=3007524864',
 '\t\tTotal committed heap usage (bytes)=226562048',
 '\tShuffle Errors',
 '\t\tBAD_ID=0',
 '\t\tCONNECTION=0',
 '\t\tIO_ERROR=0',
 '\t\tWRONG_LENGTH=0',
 '\t\tWRONG_MAP=0',
 '\t\tWRONG_REDUCE=0',
 '\tFile Input Format Counters ',
 '\t\tBytes Read=177',
 '\tFile Output Format Counters ',
 '\t\tBytes Written=220',
 '[cloudera@quickstart ~]$ hdfs dfs -cat /user/cloudera/WordCount/output/*',
 '(including\t1']

In [ ]: