Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_upper(self):
blob = tb.TextBlob('lorem ipsum')
assert_true(is_blob(blob.upper()))
assert_equal(blob.upper(), tb.TextBlob('LOREM IPSUM'))
def test_sentiment_of_emoticons(self):
b1 = tb.TextBlob("Faces have values =)")
b2 = tb.TextBlob("Faces have values")
assert_true(b1.sentiment[0] > b2.sentiment[0])
def test_senences_with_space_before_punctuation(self):
text = "Uh oh. This sentence might cause some problems. : Now we're ok."
b = tb.TextBlob(text)
assert_equal(len(b.sentences), 3)
def sentiment(str):
blob = TextBlob(str)
return blob.sentiment.polarity
print("A value>0 is positive, close to 0 slightly positive")
print("A value<0 is negative, close to 0 slightly negative","\n")
myview=TextBlob("I hate movie 1. It was too violent ")
print(myview,":","\n",myview.sentiment,"\n")
dialog=TextBlob("I hate movie 1. It was too violent ")
myview=TextBlob("I like autumn. It reminds me of some sad music ")
print(myview,":","\n",myview.sentiment,"\n")
dialog=dialog+myview
myview=TextBlob("The love story was cool too. A bit mushy but cool ")
print(myview,":","\n",myview.sentiment,"\n")
dialog=dialog+myview
myview=TextBlob("I would like to get out of here and see other horizons ")
print(myview,":","\n",myview.sentiment,"\n")
dialog=dialog+myview
#Parse noun phrases
print("Parse noun phrases to find potential key words:")
print(dialog.noun_phrases)
m=input("Press ENTER if you agree to complete X's profiling dataset with some images")
print("The AI program will now enter social networks again and pick up KEY images")
print("that X commented with TAGS that fit the KEYWORDS found","\n")
print("CRL-MM Representation Learning Meta Model(see next section in book)")
print("The following image is a sample of the dataset of X. ")
take_images=input("Press ENTER to Continue")
def preprocess(self, textString):
text = TextBlob(textString.lower())
words = text.words.singularize()
words = [self.preprocessDigits(word)
for word in words]
words = [self.preprocessMonths(word)
for word in words]
newWordList = []
newWordList.append('')
newWordList.extend(words)
newWordList.append('')
grams = [gram for gram in nltk.trigrams(
newWordList) if len(set(gram) - self.stopWords) > 0]
grams.extend([gram for gram in nltk.bigrams(newWordList)
if len(set(gram) - self.stopWords) > 0])
grams.extend([word for word in words
if word not in self.stopWords])
return grams
def translateMessage(self, text):
translateKeyword = 'translate to'
translateSep = '-'
textSeperator = 'and send to'
text = text.lower()
langStart = text.index(translateKeyword)
langEnd = text.index(translateSep)
textEnd = text.index(textSeperator)
langDetect = text[langStart+13:langEnd].strip()
textDetect = text[langEnd+1:textEnd].strip()
contactDetect = text[textEnd+11:].strip()
blob = TextBlob(textDetect)
translatedText = str(blob.translate(to=self._ISOlanguage[langDetect.strip()]))
self.send_to_whatsapp_id(contactDetect,translatedText)
self._translateContacts.append(contactDetect)
def read_job_description(file_path):
"""Reads a text file with the job title on the first line and description following
Args:
file_path (str): path to text file (.txt format) containing job title and description
Returns:
dictionary containing job title and job description as a TextBlob
"""
job_data = {}
with open(file_path) as file:
job_data['title'] = unicode(file.readline(), errors='ignore').replace('\n', '')
description = unicode(file.read(), errors='ignore')
job_data['description'] = TextBlob(description.decode('utf8').encode('ascii','ignore'))
job_data['keywords'] = extract_keywords(job_data['description'])
job_data['value_sentences'] = extract_value_sentences(job_data['description'])
job_data['actions'] = extract_actions(job_data['description'])
job_data['acronyms'] = extract_acronyms(job_data['description'])
return job_data
def read_resume(file_path):
with open(file_path) as file:
content = unicode(file.read(), errors='ignore')
return TextBlob(content.decode('utf8').encode('ascii', 'ignore'))