changed tf-idf
This commit is contained in:
parent
d9fdee7b87
commit
a736e05d00
37
indexer.py
37
indexer.py
@ -10,6 +10,7 @@
|
|||||||
#Posting ---> Source of file, tf-idf score. #for now we will only use these two, as we get more complex posting will be change accordingly
|
#Posting ---> Source of file, tf-idf score. #for now we will only use these two, as we get more complex posting will be change accordingly
|
||||||
|
|
||||||
#Data input
|
#Data input
|
||||||
|
import math
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import shelve
|
import shelve
|
||||||
@ -43,6 +44,7 @@ class Indexer():
|
|||||||
self.stemmer = PorterStemmer()
|
self.stemmer = PorterStemmer()
|
||||||
self.id = list()
|
self.id = list()
|
||||||
|
|
||||||
|
|
||||||
# Creates a pickle file that is a list of urls where the index of the url is the id that the posting refers to.
|
# Creates a pickle file that is a list of urls where the index of the url is the id that the posting refers to.
|
||||||
p = os.path.dirname(os.path.abspath(__file__))
|
p = os.path.dirname(os.path.abspath(__file__))
|
||||||
my_filename = os.path.join(p, "urlID.pkl")
|
my_filename = os.path.join(p, "urlID.pkl")
|
||||||
@ -164,7 +166,7 @@ class Indexer():
|
|||||||
# words = [whole text] one element list
|
# words = [whole text] one element list
|
||||||
# return the score
|
# return the score
|
||||||
try:
|
try:
|
||||||
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
tfidf = TfidfVectorizer(ngram_range=(1,1)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||||
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
||||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||||
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
||||||
@ -173,7 +175,27 @@ class Indexer():
|
|||||||
#print(df) # debugging
|
#print(df) # debugging
|
||||||
except:
|
except:
|
||||||
print("Error in tf_idf!")
|
print("Error in tf_idf!")
|
||||||
return
|
return -1
|
||||||
|
|
||||||
|
def tf(self, text, url):
|
||||||
|
# tf
|
||||||
|
tokens = {}
|
||||||
|
split = text.split(" ")
|
||||||
|
# loop using index to keep track of position
|
||||||
|
for i in range(len(split)):
|
||||||
|
if split[i] not in tokens:
|
||||||
|
tokens[split[i]] = Posting(self.get_url_id(url), 1, i)
|
||||||
|
|
||||||
|
else:
|
||||||
|
tokens[split[i]].rtf += 1
|
||||||
|
tokens[split[i]].tf = (1 + math.log(tokens[split[i]].rtf))
|
||||||
|
tokens[split[i]].positions.append(i)
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
def tfidf(self, current_save):
|
||||||
|
for token, postings in current_save.items():
|
||||||
|
for p in postings:
|
||||||
|
p.tfidf = p.tf * math.log(len(self.id)/len(postings))
|
||||||
|
|
||||||
def get_data(self):
|
def get_data(self):
|
||||||
|
|
||||||
@ -190,10 +212,15 @@ class Indexer():
|
|||||||
while True:
|
while True:
|
||||||
file_path = self.path + "" + directory + "/"+file
|
file_path = self.path + "" + directory + "/"+file
|
||||||
# Add url to id here so that there isn't any problems when worker is multi-threaded
|
# Add url to id here so that there isn't any problems when worker is multi-threaded
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
load = open(file_path)
|
load = open(file_path)
|
||||||
data = json.load(load)
|
data = json.load(load)
|
||||||
if data["url"] not in self.id:
|
if data["url"] not in self.id:
|
||||||
self.id.append(data["url"])
|
self.id.append(data["url"])
|
||||||
|
toc = perf_counter()
|
||||||
|
print("Took " + str(toc - tic) + " seconds to save url to self.id")
|
||||||
|
|
||||||
if len(threads) < num_threads:
|
if len(threads) < num_threads:
|
||||||
thread = Worker(self,file_path)
|
thread = Worker(self,file_path)
|
||||||
threads.append(thread)
|
threads.append(thread)
|
||||||
@ -209,6 +236,12 @@ class Indexer():
|
|||||||
if(index >= num_threads):
|
if(index >= num_threads):
|
||||||
index = 0
|
index = 0
|
||||||
time.sleep(.1)
|
time.sleep(.1)
|
||||||
|
# These last few function calls calculates idf and finalizes tf-idf weighting for each index
|
||||||
|
self.tfidf(self.save_1)
|
||||||
|
self.tfidf(self.save_2)
|
||||||
|
self.tfidf(self.save_3)
|
||||||
|
self.tfidf(self.save_4)
|
||||||
|
self.tfidf(self.save_5)
|
||||||
pickle.dump(self.id, self.f)
|
pickle.dump(self.id, self.f)
|
||||||
# should I self.f.close() here?
|
# should I self.f.close() here?
|
||||||
#Found 55770 documents
|
#Found 55770 documents
|
||||||
|
89
search.py
89
search.py
@ -6,6 +6,7 @@ from bs4 import BeautifulSoup
|
|||||||
from time import perf_counter
|
from time import perf_counter
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
|
||||||
#Data process
|
#Data process
|
||||||
@ -29,6 +30,11 @@ class Search():
|
|||||||
self.save_3 = shelve.open("save_3.shelve")
|
self.save_3 = shelve.open("save_3.shelve")
|
||||||
self.save_4 = shelve.open("save_4.shelve")
|
self.save_4 = shelve.open("save_4.shelve")
|
||||||
self.save_5 = shelve.open("save_5.shelve")
|
self.save_5 = shelve.open("save_5.shelve")
|
||||||
|
self.stemmer = PorterStemmer()
|
||||||
|
p = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
my_filename = os.path.join(p, "urlID.pkl")
|
||||||
|
self.f = open(my_filename, "rb+")
|
||||||
|
self.id = pickle.load(self.f)
|
||||||
|
|
||||||
def get_save_file(self, word):
|
def get_save_file(self, word):
|
||||||
word_lower = word.lower()
|
word_lower = word.lower()
|
||||||
@ -44,17 +50,84 @@ class Search():
|
|||||||
else:
|
else:
|
||||||
return self.save_5
|
return self.save_5
|
||||||
|
|
||||||
def get_userinput():
|
# looks for the smallest list and largest list
|
||||||
return
|
def find_extremes(self, q):
|
||||||
|
longest = float('-inf')
|
||||||
|
shortest = float('inf')
|
||||||
|
remaining = []
|
||||||
|
# Careful if there is a word that the indexer doesn't have
|
||||||
|
for word in q:
|
||||||
|
d = self.get_save_file(word)
|
||||||
|
if len(d[word]) > longest:
|
||||||
|
longest = len(d[word])
|
||||||
|
l = word
|
||||||
|
elif len(d[word]) < shortest:
|
||||||
|
shortest = len(d[word])
|
||||||
|
s = word
|
||||||
|
for word in q:
|
||||||
|
if word != l or word != s:
|
||||||
|
remaining.append(word)
|
||||||
|
return s, l, remaining
|
||||||
|
|
||||||
def get_tf_idf(self, words):
|
def merge(self, short, long, r):
|
||||||
try:
|
m = []
|
||||||
tfidf = TfidfVectorizer(ngram_range=(1,3))
|
i = 0
|
||||||
|
j = 0
|
||||||
|
s = self.get_save_file(short)
|
||||||
|
l = self.get_save_file(long)
|
||||||
|
while i < len(s[short]) or j < len(l[long]):
|
||||||
|
if i == len(d[short])-1:
|
||||||
|
if s[short][i].url == l[long][j].url:
|
||||||
|
m.append(s[short][i].url)
|
||||||
|
j += 1
|
||||||
|
elif s[short][i].url < l[long][j].url:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
j += 1
|
||||||
|
else:
|
||||||
|
if s[short][i].url == l[long][j].url:
|
||||||
|
m.append(d[short][i].url)
|
||||||
|
i += 1
|
||||||
|
j += 1
|
||||||
|
elif s[short][i].url < l[long][j].url:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
i += 1
|
||||||
|
j += 1
|
||||||
|
|
||||||
|
final = []
|
||||||
|
if len(m) > 0:
|
||||||
|
while len(r) > 0:
|
||||||
|
d = self.get_save_file(r[0])
|
||||||
|
for i in d[r[0]]:
|
||||||
|
if i.url > m[len(m) -1]:
|
||||||
|
break
|
||||||
|
elif i.url in m:
|
||||||
|
final.append(i.url)
|
||||||
|
if len(final) != len(m):
|
||||||
|
m = final
|
||||||
|
final = []
|
||||||
|
r.pop(0)
|
||||||
|
else:
|
||||||
|
final = []
|
||||||
|
r.pop(0)
|
||||||
|
|
||||||
|
return m
|
||||||
|
else:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
def search(self):
|
||||||
|
query = input("Enter query: ")
|
||||||
|
query = [self.stemmer.stem(i) for i in query.split()]
|
||||||
|
x = self.find_extremes(query)
|
||||||
|
match = self.merge(x[0], x[1], x[2])
|
||||||
|
if match == -1:
|
||||||
|
print("No valid matches")
|
||||||
|
else:
|
||||||
|
for i in match:
|
||||||
|
print(self.id[i])
|
||||||
|
|
||||||
def search(query):
|
|
||||||
x = [query]
|
|
||||||
|
|
||||||
file = self.get_save_file()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
17
worker.py
17
worker.py
@ -31,11 +31,12 @@ class Worker(Thread):
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
print("Target: " + str(self.file))
|
print("Target: " + str(self.file))
|
||||||
|
ticker = perf_counter()
|
||||||
file_load = open(self.file)
|
file_load = open(self.file)
|
||||||
data = json.load(file_load)
|
data = json.load(file_load)
|
||||||
soup = BeautifulSoup(data["content"],features="lxml")
|
soup = BeautifulSoup(data["content"],features="lxml")
|
||||||
# Gets a cleaner version text comparative to soup.get_text()
|
# Gets a cleaner version text comparative to soup.get_text()
|
||||||
|
tic = perf_counter()
|
||||||
clean_text = ' '.join(soup.stripped_strings)
|
clean_text = ' '.join(soup.stripped_strings)
|
||||||
# Looks for large white space, tabbed space, and other forms of spacing and removes it
|
# Looks for large white space, tabbed space, and other forms of spacing and removes it
|
||||||
# Regex expression matches for space characters excluding a single space or words
|
# Regex expression matches for space characters excluding a single space or words
|
||||||
@ -46,11 +47,19 @@ class Worker(Thread):
|
|||||||
clean_text = " ".join([self.indexer.stemmer.stem(i) for i in clean_text.split()])
|
clean_text = " ".join([self.indexer.stemmer.stem(i) for i in clean_text.split()])
|
||||||
# Put clean_text as an element in a list because get_tf_idf workers properly with single element lists
|
# Put clean_text as an element in a list because get_tf_idf workers properly with single element lists
|
||||||
x = [clean_text]
|
x = [clean_text]
|
||||||
|
toc = perf_counter()
|
||||||
|
print("Took " + str(toc - tic) + " seconds to create clean text")
|
||||||
# ngrams is a dict
|
# ngrams is a dict
|
||||||
# structure looks like {ngram : {0: tf-idf score}}
|
# structure looks like {ngram : {0: tf-idf score}}
|
||||||
ngrams = self.indexer.get_tf_idf(x)
|
ngrams = self.indexer.get_tf_idf(x)
|
||||||
|
if ngrams != -1:
|
||||||
|
tic = perf_counter()
|
||||||
|
for ngram, tfidf in ngrams.items():
|
||||||
|
posting = Posting(self.indexer.get_url_id(data["url"]), tfidf[0])
|
||||||
|
self.indexer.save_index(ngram,posting)
|
||||||
|
toc = perf_counter()
|
||||||
|
print("Took " + str(toc - tic) + " seconds to save ngram")
|
||||||
|
|
||||||
for ngram, tfidf in ngrams.items():
|
tocker = perf_counter()
|
||||||
posting = Posting(self.indexer.get_url_id(data["url"]), tfidf[0])
|
print("Took " + str(tocker - ticker) + " seconds to work")
|
||||||
self.indexer.save_index(ngram,posting)
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user