From a736e05d009eb296a23ba053e9319bf51bb706b3 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 25 May 2022 18:39:02 -0700 Subject: [PATCH] changed tf-idf --- indexer.py | 37 ++++++++++++++++++++-- search.py | 93 ++++++++++++++++++++++++++++++++++++++++++++++++------ worker.py | 19 ++++++++--- 3 files changed, 132 insertions(+), 17 deletions(-) diff --git a/indexer.py b/indexer.py index 9ce39d1..5bc734c 100644 --- a/indexer.py +++ b/indexer.py @@ -10,6 +10,7 @@ #Posting ---> Source of file, tf-idf score. #for now we will only use these two, as we get more complex posting will be change accordingly #Data input +import math import json import os import shelve @@ -43,6 +44,7 @@ class Indexer(): self.stemmer = PorterStemmer() self.id = list() + # Creates a pickle file that is a list of urls where the index of the url is the id that the posting refers to. p = os.path.dirname(os.path.abspath(__file__)) my_filename = os.path.join(p, "urlID.pkl") @@ -164,7 +166,7 @@ class Indexer(): # words = [whole text] one element list # return the score try: - tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams + tfidf = TfidfVectorizer(ngram_range=(1,1)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram #return(df.iloc[0][''.join(word)]) #used for finding single word in dataset @@ -173,7 +175,27 @@ class Indexer(): #print(df) # debugging except: print("Error in tf_idf!") - return + return -1 + + def tf(self, text, url): + # tf + tokens = {} + split = text.split(" ") + # loop using index to keep track of position + for i in range(len(split)): + if split[i] not in tokens: + tokens[split[i]] = Posting(self.get_url_id(url), 1, i) + + else: + tokens[split[i]].rtf += 1 + tokens[split[i]].tf = (1 + math.log(tokens[split[i]].rtf)) + tokens[split[i]].positions.append(i) + return tokens + + def tfidf(self, current_save): + for token, postings in current_save.items(): + for p in postings: + p.tfidf = p.tf * math.log(len(self.id)/len(postings)) def get_data(self): @@ -190,10 +212,15 @@ class Indexer(): while True: file_path = self.path + "" + directory + "/"+file # Add url to id here so that there isn't any problems when worker is multi-threaded + + tic = perf_counter() load = open(file_path) data = json.load(load) if data["url"] not in self.id: self.id.append(data["url"]) + toc = perf_counter() + print("Took " + str(toc - tic) + " seconds to save url to self.id") + if len(threads) < num_threads: thread = Worker(self,file_path) threads.append(thread) @@ -209,6 +236,12 @@ class Indexer(): if(index >= num_threads): index = 0 time.sleep(.1) + # These last few function calls calculates idf and finalizes tf-idf weighting for each index + self.tfidf(self.save_1) + self.tfidf(self.save_2) + self.tfidf(self.save_3) + self.tfidf(self.save_4) + self.tfidf(self.save_5) pickle.dump(self.id, self.f) # should I self.f.close() here? #Found 55770 documents diff --git a/search.py b/search.py index d3e9d28..9c83b28 100644 --- a/search.py +++ b/search.py @@ -6,6 +6,7 @@ from bs4 import BeautifulSoup from time import perf_counter import time import threading +import pickle #Data process @@ -29,6 +30,11 @@ class Search(): self.save_3 = shelve.open("save_3.shelve") self.save_4 = shelve.open("save_4.shelve") self.save_5 = shelve.open("save_5.shelve") + self.stemmer = PorterStemmer() + p = os.path.dirname(os.path.abspath(__file__)) + my_filename = os.path.join(p, "urlID.pkl") + self.f = open(my_filename, "rb+") + self.id = pickle.load(self.f) def get_save_file(self, word): word_lower = word.lower() @@ -44,17 +50,84 @@ class Search(): else: return self.save_5 - def get_userinput(): - return - - def get_tf_idf(self, words): - try: - tfidf = TfidfVectorizer(ngram_range=(1,3)) - - def search(query): - x = [query] + # looks for the smallest list and largest list + def find_extremes(self, q): + longest = float('-inf') + shortest = float('inf') + remaining = [] + # Careful if there is a word that the indexer doesn't have + for word in q: + d = self.get_save_file(word) + if len(d[word]) > longest: + longest = len(d[word]) + l = word + elif len(d[word]) < shortest: + shortest = len(d[word]) + s = word + for word in q: + if word != l or word != s: + remaining.append(word) + return s, l, remaining + + def merge(self, short, long, r): + m = [] + i = 0 + j = 0 + s = self.get_save_file(short) + l = self.get_save_file(long) + while i < len(s[short]) or j < len(l[long]): + if i == len(d[short])-1: + if s[short][i].url == l[long][j].url: + m.append(s[short][i].url) + j += 1 + elif s[short][i].url < l[long][j].url: + break + else: + j += 1 + else: + if s[short][i].url == l[long][j].url: + m.append(d[short][i].url) + i += 1 + j += 1 + elif s[short][i].url < l[long][j].url: + break + else: + i += 1 + j += 1 - file = self.get_save_file() + final = [] + if len(m) > 0: + while len(r) > 0: + d = self.get_save_file(r[0]) + for i in d[r[0]]: + if i.url > m[len(m) -1]: + break + elif i.url in m: + final.append(i.url) + if len(final) != len(m): + m = final + final = [] + r.pop(0) + else: + final = [] + r.pop(0) + + return m + else: + return -1 + + def search(self): + query = input("Enter query: ") + query = [self.stemmer.stem(i) for i in query.split()] + x = self.find_extremes(query) + match = self.merge(x[0], x[1], x[2]) + if match == -1: + print("No valid matches") + else: + for i in match: + print(self.id[i]) + + diff --git a/worker.py b/worker.py index e861935..c73d5c1 100644 --- a/worker.py +++ b/worker.py @@ -31,11 +31,12 @@ class Worker(Thread): def run(self): print("Target: " + str(self.file)) - + ticker = perf_counter() file_load = open(self.file) data = json.load(file_load) soup = BeautifulSoup(data["content"],features="lxml") # Gets a cleaner version text comparative to soup.get_text() + tic = perf_counter() clean_text = ' '.join(soup.stripped_strings) # Looks for large white space, tabbed space, and other forms of spacing and removes it # Regex expression matches for space characters excluding a single space or words @@ -46,11 +47,19 @@ class Worker(Thread): clean_text = " ".join([self.indexer.stemmer.stem(i) for i in clean_text.split()]) # Put clean_text as an element in a list because get_tf_idf workers properly with single element lists x = [clean_text] + toc = perf_counter() + print("Took " + str(toc - tic) + " seconds to create clean text") # ngrams is a dict # structure looks like {ngram : {0: tf-idf score}} ngrams = self.indexer.get_tf_idf(x) - - for ngram, tfidf in ngrams.items(): - posting = Posting(self.indexer.get_url_id(data["url"]), tfidf[0]) - self.indexer.save_index(ngram,posting) + if ngrams != -1: + tic = perf_counter() + for ngram, tfidf in ngrams.items(): + posting = Posting(self.indexer.get_url_id(data["url"]), tfidf[0]) + self.indexer.save_index(ngram,posting) + toc = perf_counter() + print("Took " + str(toc - tic) + " seconds to save ngram") + + tocker = perf_counter() + print("Took " + str(tocker - ticker) + " seconds to work")