Compare commits
4 Commits
search
...
Lacerum-pa
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e7c4170cc2 | ||
![]() |
c4b3512df7 | ||
![]() |
c8640001c7 | ||
![]() |
f5610eaa62 |
19
indexer.py
19
indexer.py
@@ -116,6 +116,7 @@ class Indexer():
|
||||
print(word)
|
||||
print("You have somehow went beyond the magic")
|
||||
return self.save_5
|
||||
|
||||
def get_save_lock(self,word):
|
||||
word_lower = word.lower()
|
||||
if re.match(r"^[a-d0-1].*",word_lower):
|
||||
@@ -130,10 +131,13 @@ class Indexer():
|
||||
print(word)
|
||||
print("You have somehow went beyond the magic")
|
||||
return self.save_5_lock.acquire()
|
||||
|
||||
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
|
||||
# so I came up with this, if anyone knows how to get a single cell and can explain it to
|
||||
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
||||
# 4am
|
||||
|
||||
# retuns a dict of words/n-grams with their assosiated tf-idf score *can also return just a single score or a pandas dataframe
|
||||
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
||||
|
||||
# Andy: added paramenter imporant_words in order to do multiplication of score
|
||||
@@ -143,6 +147,7 @@ class Indexer():
|
||||
#word the word we finding the score for
|
||||
#return the score
|
||||
try:
|
||||
'''
|
||||
tfidf = TfidfVectorizer()
|
||||
tfidf_matrix = tfidf.fit_transform(words)
|
||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||
@@ -162,11 +167,23 @@ class Indexer():
|
||||
#print(df)
|
||||
except KeyError:
|
||||
return -1
|
||||
'''
|
||||
try:
|
||||
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
||||
tfidf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||
return tfidf_dict # returns the dict of words/n-grams with tf-idf as value
|
||||
#print(df) # debugging
|
||||
except:
|
||||
print("Error in tf_idf!")
|
||||
return
|
||||
|
||||
|
||||
def get_data(self):
|
||||
|
||||
num_threads = 8
|
||||
num_threads = 1
|
||||
threads = list()
|
||||
|
||||
for directory in os.listdir(self.path):
|
||||
|
10
mytest.py
10
mytest.py
@@ -4,6 +4,7 @@ from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
|
||||
#tf_idf
|
||||
#words = whole text
|
||||
#word the word we finding the score for
|
||||
@@ -19,13 +20,12 @@ words = ['this is the first document '
|
||||
doc1 = ["I can't fucking take it any more. Among Us has singlehandedly ruined my life. The other day my teacher was teaching us Greek Mythology and he mentioned a pegasus and I immediately thought 'Pegasus? more like Mega Sus!!!!' and I've never wanted to kms more. I can't look at a vent without breaking down and fucking crying. I can't eat pasta without thinking 'IMPASTA??? THATS PRETTY SUS!!!!' Skit 4 by Kanye West. The lyrics ruined me. A Mongoose, or the 25th island of greece. The scientific name for pig. I can't fucking take it anymore. Please fucking end my suffering."]
|
||||
doc2 = ["Anyways, um... I bought a whole bunch of shungite rocks, do you know what shungite is? Anybody know what shungite is? No, not Suge Knight, I think he's locked up in prison. I'm talkin' shungite. Anyways, it's a two billion year-old like, rock stone that protects against frequencies and unwanted frequencies that may be traveling in the air. That's my story, I bought a whole bunch of stuff. Put 'em around the la casa. Little pyramids, stuff like that."]
|
||||
word = 'life'
|
||||
|
||||
try:
|
||||
tfidf = TfidfVectorizer()
|
||||
tfidf_matrix = tfidf.fit_transform(doc1)
|
||||
tfidf = TfidfVectorizer(ngram_range=(3,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||
tfidf_matrix = tfidf.fit_transform(words)
|
||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||
print(df.iloc[0][''.join(word)])
|
||||
#print(df)
|
||||
#print(df.iloc[0][''.join(word)])
|
||||
data = df.to_dict()
|
||||
except KeyError: # word does not exist
|
||||
print(-1)
|
||||
|
||||
|
61
worker.py
61
worker.py
@@ -52,49 +52,54 @@ class Worker(Thread):
|
||||
tic = perf_counter()
|
||||
for word in words:
|
||||
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
||||
#So all the tokenized words are here,
|
||||
tokenized_words.append(word)
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
||||
#YOUR CODE HERE
|
||||
|
||||
tic = perf_counter()
|
||||
for word in tokenized_words:
|
||||
stemmed_words.append(self.indexer.stemmer.stem(word))
|
||||
#stemming,
|
||||
#tf_idf
|
||||
#get_tf_idf(stemmed_words,word)
|
||||
#post = Posting()
|
||||
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
||||
|
||||
counts = Counter(stemmed_words)
|
||||
size = len(stemmed_words)
|
||||
for word in counts:
|
||||
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
||||
tic = perf_counter()
|
||||
weight = 1.0
|
||||
index = 0
|
||||
"""
|
||||
for group in important:
|
||||
for word_important in group:
|
||||
if word_important.lower() == word.lower():
|
||||
if index == 0:
|
||||
weight = 1.2
|
||||
elif index == 1:
|
||||
weight = 1.8
|
||||
elif index == 2:
|
||||
weight = 1.5
|
||||
elif index == 3:
|
||||
weight = 1.3
|
||||
elif index == 4:
|
||||
weight = 2.0
|
||||
index = index + 1
|
||||
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||
tfidf_matrix = tfidf.fit_transform(stemmed_words) # fit trains the model, transform creates matrix
|
||||
#df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||
tfidf.sget_feature_names_out()
|
||||
#tf_idf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||
|
||||
print(tfidf_matrix)
|
||||
"""
|
||||
|
||||
posting = Posting(data["url"],counts[word]/size*weight)
|
||||
tfIdfVectorizer=TfidfVectorizer(use_idf=True)
|
||||
tfIdf = tfIdfVectorizer.fit_transform(stemmed_words)
|
||||
df = pd.DataFrame(tfIdf[0].T.todense(), index=tfIdfVectorizer.get_feature_names_out(), columns=["TF-IDF"])
|
||||
df = df.sort_values('TF-IDF', ascending=False)
|
||||
|
||||
print(df.head(25))
|
||||
|
||||
for word in tf_idf_dict.keys():
|
||||
tic = perf_counter()
|
||||
print(tf_idf_dict)
|
||||
weight = 1.0
|
||||
for k,v in important.items():
|
||||
if k == 'b' and word in v:
|
||||
weight = 1.2
|
||||
elif k == 'h1' and word in v:
|
||||
weight = 1.75
|
||||
elif k == 'h2' and word in v:
|
||||
weight = 1.5
|
||||
elif k == 'h3' and word in v:
|
||||
weight = 1.2
|
||||
elif k == 'title' and word in v:
|
||||
weight = 2
|
||||
|
||||
posting = Posting(data["url"],tf_idf_dict[word]*weight)
|
||||
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||
|
Reference in New Issue
Block a user