Changed tf_idf model into the new one, try it on the current dataset
This commit is contained in:
parent
c8640001c7
commit
c4b3512df7
@ -116,6 +116,7 @@ class Indexer():
|
|||||||
print(word)
|
print(word)
|
||||||
print("You have somehow went beyond the magic")
|
print("You have somehow went beyond the magic")
|
||||||
return self.save_5
|
return self.save_5
|
||||||
|
|
||||||
def get_save_lock(self,word):
|
def get_save_lock(self,word):
|
||||||
word_lower = word.lower()
|
word_lower = word.lower()
|
||||||
if re.match(r"^[a-d0-1].*",word_lower):
|
if re.match(r"^[a-d0-1].*",word_lower):
|
||||||
@ -130,6 +131,7 @@ class Indexer():
|
|||||||
print(word)
|
print(word)
|
||||||
print("You have somehow went beyond the magic")
|
print("You have somehow went beyond the magic")
|
||||||
return self.save_5_lock.acquire()
|
return self.save_5_lock.acquire()
|
||||||
|
|
||||||
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
|
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
|
||||||
# so I came up with this, if anyone knows how to get a single cell and can explain it to
|
# so I came up with this, if anyone knows how to get a single cell and can explain it to
|
||||||
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
||||||
@ -178,7 +180,7 @@ class Indexer():
|
|||||||
|
|
||||||
def get_data(self):
|
def get_data(self):
|
||||||
|
|
||||||
num_threads = 8
|
num_threads = 1
|
||||||
threads = list()
|
threads = list()
|
||||||
|
|
||||||
for directory in os.listdir(self.path):
|
for directory in os.listdir(self.path):
|
||||||
|
61
worker.py
61
worker.py
@ -52,49 +52,54 @@ class Worker(Thread):
|
|||||||
tic = perf_counter()
|
tic = perf_counter()
|
||||||
for word in words:
|
for word in words:
|
||||||
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
||||||
#So all the tokenized words are here,
|
|
||||||
tokenized_words.append(word)
|
tokenized_words.append(word)
|
||||||
toc = perf_counter()
|
toc = perf_counter()
|
||||||
if toc - tic > 1 :
|
if toc - tic > 1 :
|
||||||
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
||||||
#YOUR CODE HERE
|
|
||||||
|
|
||||||
tic = perf_counter()
|
tic = perf_counter()
|
||||||
for word in tokenized_words:
|
for word in tokenized_words:
|
||||||
stemmed_words.append(self.indexer.stemmer.stem(word))
|
stemmed_words.append(self.indexer.stemmer.stem(word))
|
||||||
#stemming,
|
|
||||||
#tf_idf
|
|
||||||
#get_tf_idf(stemmed_words,word)
|
|
||||||
#post = Posting()
|
|
||||||
toc = perf_counter()
|
toc = perf_counter()
|
||||||
if toc - tic > 1 :
|
if toc - tic > 1 :
|
||||||
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
||||||
|
|
||||||
counts = Counter(stemmed_words)
|
"""
|
||||||
size = len(stemmed_words)
|
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||||
for word in counts:
|
tfidf_matrix = tfidf.fit_transform(stemmed_words) # fit trains the model, transform creates matrix
|
||||||
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
#df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||||
|
tfidf.sget_feature_names_out()
|
||||||
|
#tf_idf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||||
|
|
||||||
|
print(tfidf_matrix)
|
||||||
|
"""
|
||||||
|
|
||||||
|
tfIdfVectorizer=TfidfVectorizer(use_idf=True)
|
||||||
|
tfIdf = tfIdfVectorizer.fit_transform(stemmed_words)
|
||||||
|
df = pd.DataFrame(tfIdf[0].T.todense(), index=tfIdfVectorizer.get_feature_names_out(), columns=["TF-IDF"])
|
||||||
|
df = df.sort_values('TF-IDF', ascending=False)
|
||||||
|
|
||||||
|
print(df.head(25))
|
||||||
|
|
||||||
|
for word in tf_idf_dict.keys():
|
||||||
tic = perf_counter()
|
tic = perf_counter()
|
||||||
|
print(tf_idf_dict)
|
||||||
weight = 1.0
|
weight = 1.0
|
||||||
index = 0
|
for k,v in important.items():
|
||||||
"""
|
if k == 'b' and word in v:
|
||||||
for group in important:
|
weight = 1.2
|
||||||
for word_important in group:
|
elif k == 'h1' and word in v:
|
||||||
if word_important.lower() == word.lower():
|
weight = 1.75
|
||||||
if index == 0:
|
elif k == 'h2' and word in v:
|
||||||
weight = 1.2
|
weight = 1.5
|
||||||
elif index == 1:
|
elif k == 'h3' and word in v:
|
||||||
weight = 1.8
|
weight = 1.2
|
||||||
elif index == 2:
|
elif k == 'title' and word in v:
|
||||||
weight = 1.5
|
weight = 2
|
||||||
elif index == 3:
|
|
||||||
weight = 1.3
|
posting = Posting(data["url"],tf_idf_dict[word]*weight)
|
||||||
elif index == 4:
|
|
||||||
weight = 2.0
|
|
||||||
index = index + 1
|
|
||||||
"""
|
|
||||||
|
|
||||||
posting = Posting(data["url"],counts[word]/size*weight)
|
|
||||||
toc = perf_counter()
|
toc = perf_counter()
|
||||||
if toc - tic > 1 :
|
if toc - tic > 1 :
|
||||||
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||||
|
Loading…
Reference in New Issue
Block a user