Added way to save ngrams to index
This commit is contained in:
91
worker.py
91
worker.py
@@ -5,6 +5,7 @@ import shelve
|
||||
from bs4 import BeautifulSoup
|
||||
from time import perf_counter
|
||||
import time
|
||||
import pickle
|
||||
|
||||
import re
|
||||
|
||||
@@ -30,80 +31,26 @@ class Worker(Thread):
|
||||
|
||||
def run(self):
|
||||
print("Target: " + str(self.file))
|
||||
ticker = perf_counter()
|
||||
tic = perf_counter()
|
||||
|
||||
file_load = open(self.file)
|
||||
data = json.load(file_load)
|
||||
soup = BeautifulSoup(data["content"],features="lxml")
|
||||
words = word_tokenize(soup.get_text())
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
||||
# Gets a cleaner version text comparative to soup.get_text()
|
||||
clean_text = ' '.join(soup.stripped_strings)
|
||||
# Looks for large white space, tabbed space, and other forms of spacing and removes it
|
||||
# Regex expression matches for space characters excluding a single space or words
|
||||
clean_text = re.sub(r'\s[^ \w]', '', clean_text)
|
||||
# Tokenizes text and joins it back into an entire string. Make sure it is an entire string is essential for get_tf_idf to work as intended
|
||||
clean_text = " ".join([i for i in clean_text.split() if i != "" and re.fullmatch('[A-Za-z0-9]+', i)])
|
||||
# Stems tokenized text
|
||||
clean_text = " ".join([self.indexer.stemmer.stem(i) for i in clean_text.split()])
|
||||
# Put clean_text as an element in a list because get_tf_idf workers properly with single element lists
|
||||
x = [clean_text]
|
||||
# ngrams is a dict
|
||||
# structure looks like {ngram : {0: tf-idf score}}
|
||||
ngrams = self.indexer.get_tf_idf(x)
|
||||
|
||||
tokenized_words = list()
|
||||
stemmed_words = list()
|
||||
for ngram, tfidf in ngrams.items():
|
||||
posting = Posting(self.indexer.get_url_id(data["url"]), tfidf[0])
|
||||
self.indexer.save_index(ngram,posting)
|
||||
|
||||
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
|
||||
for key_words in important.keys():
|
||||
for i in soup.findAll(key_words):
|
||||
for word in word_tokenize(i.text):
|
||||
important[key_words].append(self.indexer.stemmer.stem(word))
|
||||
|
||||
tic = perf_counter()
|
||||
for word in words:
|
||||
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
||||
#So all the tokenized words are here,
|
||||
tokenized_words.append(word)
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
||||
#YOUR CODE HERE
|
||||
|
||||
tic = perf_counter()
|
||||
for word in tokenized_words:
|
||||
stemmed_words.append(self.indexer.stemmer.stem(word))
|
||||
#stemming,
|
||||
#tf_idf
|
||||
#get_tf_idf(stemmed_words,word)
|
||||
#post = Posting()
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
||||
|
||||
counts = Counter(stemmed_words)
|
||||
size = len(stemmed_words)
|
||||
for word in counts:
|
||||
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
||||
tic = perf_counter()
|
||||
weight = 1.0
|
||||
index = 0
|
||||
"""
|
||||
for group in important:
|
||||
for word_important in group:
|
||||
if word_important.lower() == word.lower():
|
||||
if index == 0:
|
||||
weight = 1.2
|
||||
elif index == 1:
|
||||
weight = 1.8
|
||||
elif index == 2:
|
||||
weight = 1.5
|
||||
elif index == 3:
|
||||
weight = 1.3
|
||||
elif index == 4:
|
||||
weight = 2.0
|
||||
index = index + 1
|
||||
"""
|
||||
|
||||
posting = Posting(data["url"],counts[word]/size*weight)
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||
|
||||
tic = perf_counter()
|
||||
self.indexer.save_index(word,posting)
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to save text !")
|
||||
|
||||
tocker = perf_counter()
|
||||
print("Finished " + data['url'] + "\n" + str(tocker-ticker))
|
||||
|
||||
Reference in New Issue
Block a user