In [1]:
# Tokenize: convert sentence to words
# word_tokenize: converts sentence to words
# sent_tokenize: tokenizes sentences; creates seperate sentences.
# Considers each "Period" as seperate word.
from nltk.tokenize import word_tokenize, sent_tokenize
text = "Hello Mr. Daniel, how are you today. Hope you are doing good!!"
print(word_tokenize(text))
print('\n')
for words in word_tokenize(text):
print(words)
In [ ]: