7 Commits

Author SHA1 Message Date
Aaron
e7c4170cc2 Update indexer.py
had incorrect implementation
2022-05-12 17:58:31 -07:00
inocturnis
c4b3512df7 Changed tf_idf model into the new one, try it on the current dataset 2022-05-12 15:00:09 -07:00
iNocturnis
c8640001c7 Merge branch 'tf_idf' 2022-05-12 14:30:22 -07:00
Lacerum
f5610eaa62 tf-idf ngrams and now returns dict rather than
score
2022-05-11 14:46:32 -07:00
inocturnis
f1fe3b26ac Merged with weighting but cannot implement due to tokens being messy and some comparison error 2022-05-06 20:45:52 -07:00
iNocturnis
5c703b6471 Merge remote-tracking branch 'origin/posting' 2022-05-06 20:26:03 -07:00
inocturnis
c892bbac03 Changed counter for tf to one doing O(n) instead of O(n^2), included multi-threading to speed up processing speed 2022-05-06 20:22:52 -07:00
3 changed files with 193 additions and 72 deletions

View File

@@ -15,7 +15,8 @@ import os
import shelve import shelve
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import perf_counter from time import perf_counter
import time
import threading
#Data process #Data process
@@ -29,6 +30,7 @@ import re
#Logging postings #Logging postings
from posting import Posting from posting import Posting
from worker import Worker
class Indexer(): class Indexer():
@@ -61,16 +63,27 @@ class Indexer():
self.save_1 = shelve.open("save_1.shelve") self.save_1 = shelve.open("save_1.shelve")
self.save_1_lock = threading.Lock()
self.save_2 = shelve.open("save_2.shelve") self.save_2 = shelve.open("save_2.shelve")
self.save_2_lock = threading.Lock()
self.save_3 = shelve.open("save_3.shelve") self.save_3 = shelve.open("save_3.shelve")
self.save_3_lock = threading.Lock()
self.save_4 = shelve.open("save_4.shelve") self.save_4 = shelve.open("save_4.shelve")
self.save_4_lock = threading.Lock()
self.save_5 = shelve.open("save_5.shelve") self.save_5 = shelve.open("save_5.shelve")
self.save_5_lock = threading.Lock()
print(len(list(self.save_1.keys())))
print(len(list(self.save_2.keys())))
print(len(list(self.save_3.keys())))
print(len(list(self.save_4.keys())))
print(len(list(self.save_5.keys())))
def save_index(self,word,posting): def save_index(self,word,posting):
cur_save = self.get_save_file(word) cur_save = self.get_save_file(word)
lock = self.get_save_lock(word)
lock.acquire()
shelve_list = list() shelve_list = list()
try: try:
shelve_list = cur_save[word] shelve_list = cur_save[word]
shelve_list.append(posting) shelve_list.append(posting)
@@ -80,10 +93,12 @@ class Indexer():
if toc - tic > 1 : if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to sort shelve list !") print("Took " + str(toc - tic) + "seconds to sort shelve list !")
cur_save.sync() cur_save.sync()
lock.release()
except: except:
shelve_list.append(posting) shelve_list.append(posting)
cur_save[word] = shelve_list cur_save[word] = shelve_list
cur_save.sync() cur_save.sync()
lock.release()
def get_save_file(self,word): def get_save_file(self,word):
#return the correct save depending on the starting letter of word #return the correct save depending on the starting letter of word
@@ -102,10 +117,27 @@ class Indexer():
print("You have somehow went beyond the magic") print("You have somehow went beyond the magic")
return self.save_5 return self.save_5
def get_save_lock(self,word):
word_lower = word.lower()
if re.match(r"^[a-d0-1].*",word_lower):
return self.save_1_lock
elif re.match(r"^[e-k2-3].*",word_lower):
return self.save_2_lock
elif re.match(r"^[l-q4-7].*",word_lower):
return self.save_3_lock
elif re.match(r"^[r-z8-9].*",word_lower):
return self.save_4_lock
else:
print(word)
print("You have somehow went beyond the magic")
return self.save_5_lock.acquire()
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell. # I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
# so I came up with this, if anyone knows how to get a single cell and can explain it to # so I came up with this, if anyone knows how to get a single cell and can explain it to
# me I would love to know, as I think that method might be quicker, maybe, idk it like # me I would love to know, as I think that method might be quicker, maybe, idk it like
# 4am # 4am
# retuns a dict of words/n-grams with their assosiated tf-idf score *can also return just a single score or a pandas dataframe
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen # https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
# Andy: added paramenter imporant_words in order to do multiplication of score # Andy: added paramenter imporant_words in order to do multiplication of score
@@ -115,6 +147,7 @@ class Indexer():
#word the word we finding the score for #word the word we finding the score for
#return the score #return the score
try: try:
'''
tfidf = TfidfVectorizer() tfidf = TfidfVectorizer()
tfidf_matrix = tfidf.fit_transform(words) tfidf_matrix = tfidf.fit_transform(words)
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
@@ -134,83 +167,57 @@ class Indexer():
#print(df) #print(df)
except KeyError: except KeyError:
return -1 return -1
'''
try:
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
tfidf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
return tfidf_dict # returns the dict of words/n-grams with tf-idf as value
#print(df) # debugging
except:
print("Error in tf_idf!")
return
def get_data(self): def get_data(self):
num_threads = 1
threads = list()
for directory in os.listdir(self.path): for directory in os.listdir(self.path):
for file in os.listdir(self.path + "/" + directory + "/"): for file in os.listdir(self.path + "/" + directory + "/"):
#Actual files here #Actual files here
#JSON["url"] = url of crawled page, ignore fragments #JSON["url"] = url of crawled page, ignore fragments
#JSON["content"] = actual HTML #JSON["content"] = actual HTML
#JSON["encoding"] = ENCODING #JSON["encoding"] = ENCODING
ticker = perf_counter() index = 0
tic = perf_counter() while True:
file_load = open(self.path + "/" + directory + "/"+file) file_path = self.path + "" + directory + "/"+file
data = json.load(file_load) if len(threads) < num_threads:
soup = BeautifulSoup(data["content"],from_encoding=data["encoding"]) thread = Worker(self,file_path)
words = word_tokenize(soup.get_text()) threads.append(thread)
thread.start()
break
else:
if not threads[index].is_alive():
threads[index] = Worker(self,file_path)
threads[index].start()
break
else:
index = index + 1
if(index >= num_threads):
index = 0
time.sleep(.1)
#Found 55770 documents
#
#getting important tokens #getting important tokens
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
for type in important.keys():
for i in soup.findAll(type):
for word in word_tokenize(i.text):
important[type].append(self.stemmer.stem(word))
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to tokenize text !")
tokenized_words = list()
stemmed_words = list()
tic = perf_counter()
for word in words:
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
#So all the tokenized words are here,
tokenized_words.append(word)
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to isalnum text !")
#YOUR CODE HERE
tic = perf_counter()
for word in tokenized_words:
stemmed_words.append(self.stemmer.stem(word))
#stemming,
#tf_idf
#get_tf_idf(stemmed_words,word)
#post = Posting()
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to stemmed text !")
for word in stemmed_words:
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
tic = perf_counter()
#added argument important
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word, important))
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
tic = perf_counter()
self.save_index(word,posting)
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to save text !")
tocker = perf_counter()
print("Finished " + data['url'] + " in \t " + str(tocker-ticker))
def tf_idf_raw(self,words,word):
tf_times = words.count(word)
tf = tf_times/len(words)
return tf
@@ -225,4 +232,4 @@ def main():
indexer.get_data() indexer.get_data()
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -4,6 +4,7 @@ from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd import pandas as pd
import numpy as np import numpy as np
#tf_idf #tf_idf
#words = whole text #words = whole text
#word the word we finding the score for #word the word we finding the score for
@@ -19,13 +20,12 @@ words = ['this is the first document '
doc1 = ["I can't fucking take it any more. Among Us has singlehandedly ruined my life. The other day my teacher was teaching us Greek Mythology and he mentioned a pegasus and I immediately thought 'Pegasus? more like Mega Sus!!!!' and I've never wanted to kms more. I can't look at a vent without breaking down and fucking crying. I can't eat pasta without thinking 'IMPASTA??? THATS PRETTY SUS!!!!' Skit 4 by Kanye West. The lyrics ruined me. A Mongoose, or the 25th island of greece. The scientific name for pig. I can't fucking take it anymore. Please fucking end my suffering."] doc1 = ["I can't fucking take it any more. Among Us has singlehandedly ruined my life. The other day my teacher was teaching us Greek Mythology and he mentioned a pegasus and I immediately thought 'Pegasus? more like Mega Sus!!!!' and I've never wanted to kms more. I can't look at a vent without breaking down and fucking crying. I can't eat pasta without thinking 'IMPASTA??? THATS PRETTY SUS!!!!' Skit 4 by Kanye West. The lyrics ruined me. A Mongoose, or the 25th island of greece. The scientific name for pig. I can't fucking take it anymore. Please fucking end my suffering."]
doc2 = ["Anyways, um... I bought a whole bunch of shungite rocks, do you know what shungite is? Anybody know what shungite is? No, not Suge Knight, I think he's locked up in prison. I'm talkin' shungite. Anyways, it's a two billion year-old like, rock stone that protects against frequencies and unwanted frequencies that may be traveling in the air. That's my story, I bought a whole bunch of stuff. Put 'em around the la casa. Little pyramids, stuff like that."] doc2 = ["Anyways, um... I bought a whole bunch of shungite rocks, do you know what shungite is? Anybody know what shungite is? No, not Suge Knight, I think he's locked up in prison. I'm talkin' shungite. Anyways, it's a two billion year-old like, rock stone that protects against frequencies and unwanted frequencies that may be traveling in the air. That's my story, I bought a whole bunch of stuff. Put 'em around the la casa. Little pyramids, stuff like that."]
word = 'life' word = 'life'
try: try:
tfidf = TfidfVectorizer() tfidf = TfidfVectorizer(ngram_range=(3,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
tfidf_matrix = tfidf.fit_transform(doc1) tfidf_matrix = tfidf.fit_transform(words)
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
print(df.iloc[0][''.join(word)]) #print(df.iloc[0][''.join(word)])
#print(df) data = df.to_dict()
except KeyError: # word does not exist except KeyError: # word does not exist
print(-1) print(-1)

114
worker.py Normal file
View File

@@ -0,0 +1,114 @@
from threading import Thread
import json
import os
import shelve
from bs4 import BeautifulSoup
from time import perf_counter
import time
import re
#Data process
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import numpy as np
from collections import Counter
from posting import Posting
import sys
class Worker(Thread):
def __init__(self,indexer,target):
self.file = target
self.indexer = indexer
super().__init__(daemon=True)
def run(self):
print("Target: " + str(self.file))
ticker = perf_counter()
tic = perf_counter()
file_load = open(self.file)
data = json.load(file_load)
soup = BeautifulSoup(data["content"],features="lxml")
words = word_tokenize(soup.get_text())
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to tokenize text !")
tokenized_words = list()
stemmed_words = list()
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
for key_words in important.keys():
for i in soup.findAll(key_words):
for word in word_tokenize(i.text):
important[key_words].append(self.indexer.stemmer.stem(word))
tic = perf_counter()
for word in words:
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
tokenized_words.append(word)
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to isalnum text !")
tic = perf_counter()
for word in tokenized_words:
stemmed_words.append(self.indexer.stemmer.stem(word))
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to stemmed text !")
"""
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
tfidf_matrix = tfidf.fit_transform(stemmed_words) # fit trains the model, transform creates matrix
#df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
tfidf.sget_feature_names_out()
#tf_idf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
print(tfidf_matrix)
"""
tfIdfVectorizer=TfidfVectorizer(use_idf=True)
tfIdf = tfIdfVectorizer.fit_transform(stemmed_words)
df = pd.DataFrame(tfIdf[0].T.todense(), index=tfIdfVectorizer.get_feature_names_out(), columns=["TF-IDF"])
df = df.sort_values('TF-IDF', ascending=False)
print(df.head(25))
for word in tf_idf_dict.keys():
tic = perf_counter()
print(tf_idf_dict)
weight = 1.0
for k,v in important.items():
if k == 'b' and word in v:
weight = 1.2
elif k == 'h1' and word in v:
weight = 1.75
elif k == 'h2' and word in v:
weight = 1.5
elif k == 'h3' and word in v:
weight = 1.2
elif k == 'title' and word in v:
weight = 2
posting = Posting(data["url"],tf_idf_dict[word]*weight)
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
tic = perf_counter()
self.indexer.save_index(word,posting)
toc = perf_counter()
if toc - tic > 1 :
print("Took " + str(toc - tic) + "seconds to save text !")
tocker = perf_counter()
print("Finished " + data['url'] + "\n" + str(tocker-ticker))