text
stringlengths
0
207
words = remove_punctuation(words)
#print(words)
def replace_numbers(words):
#'''Replace all integer occurrences in the list of tokenized words'''
p = inflect.engine()
new_words = []
for word in words:
if word.isdigit():
new_word = p.number_to_words(word)
new_words.append(new_word)
else:
new_words.append(word)
return(new_words)
words = replace_numbers(words)
#print(words)
def remove_stopwords(words):
#'''Remove stop words from the list of tokenized words'''
new_words = []
for word in words:
if word not in stopwords.words('english'):
new_words.append(word)
return new_words
words = remove_stopwords(words)
#print(words)
def stem_words(words):
#'''Finding stem words in the list of tokenized words'''
stemmer = LancasterStemmer()
stems = []
for word in words:
stem = stemmer.stem(word)
stems.append(stem)
return stems
words = stem_words(words)
#print(words)
def lemmatize_words(words):
#'''Lemmatize verbs in the list of tokenized words'''
lemmatizer = WordNetLemmatizer()
lemmas = []
for word in words:
lemma = lemmatizer.lemmatize(word, pos = 'v')
lemmas.append(lemma)
return lemmas
words = lemmatize_words(words)
#print(words)
print(words)
Text preprocessing(non user defined)
import nltk
import re
import string
import inflect
from nltk.corpus import stopwords
from nltk import word_tokenize
series = open("dataset path.txt".txt").read()
series
series_lower = series.lower()
# Removal of numbers
result1 = re.sub(r'\d+', '', series_lower)
#result1
# Removal of punctuations
result2 = result1.translate(str.maketrans('','',string.punctuation))
#result2
# Removing white spaces
result3 = result2.strip()
#result3
# Removal of stopwords
# Tokenize the text
result3_tokens = word_tokenize(result3)
#result3_tokens
# Removing stopwords
sw = set(stopwords.words('english'))
result4 = []
for w in result3_tokens:
if w not in sw:
result4.append(w)
#result4
text_tokenize = result4
#text_tokenize
output = nltk.pos_tag(text_tokenize)
#output
Sentiment Analysis
import pandas as pd
import re
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
import nltk
from wordcloud import WordCloud
import matplotlib.pyplot as plt
file = open("dataset path.txt".txt", encoding = 'utf-8').read()
# These are not required. DO Only if asked.
# this code, clean data 2 and clean data 3