Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f5610eaa62 |
@@ -1,30 +0,0 @@
|
|||||||
# You can ignore this file. This was for testing purposes
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import shelve
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from time import perf_counter
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from nltk.tokenize import word_tokenize
|
|
||||||
from nltk.stem import PorterStemmer
|
|
||||||
import numpy as np
|
|
||||||
path_to_script = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
my_filename = os.path.join(path_to_script, "testfile.json")
|
|
||||||
url = "https://www.crummy.com/software/BeautifulSoup/bs4/doc/"
|
|
||||||
|
|
||||||
req = requests.get(url)
|
|
||||||
file = open('D:/Visual Studio Workspace/CS121/assignment3/Search_Engine/testfile.json')
|
|
||||||
content = json.load(file)
|
|
||||||
soup = BeautifulSoup(content["content"], 'lxml')
|
|
||||||
bold = []
|
|
||||||
#print(soup.prettify())
|
|
||||||
print(soup.findAll('h3'))
|
|
||||||
for i in soup.findAll('title'):
|
|
||||||
print(word_tokenize(i.text))
|
|
||||||
print(bold)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
50
indexer.py
50
indexer.py
@@ -102,38 +102,24 @@ class Indexer():
|
|||||||
print("You have somehow went beyond the magic")
|
print("You have somehow went beyond the magic")
|
||||||
return self.save_5
|
return self.save_5
|
||||||
|
|
||||||
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
|
# retuns a dict of words/n-grams with their assosiated tf-idf score *can also return just a single score or a pandas dataframe
|
||||||
# so I came up with this, if anyone knows how to get a single cell and can explain it to
|
|
||||||
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
|
||||||
# 4am
|
|
||||||
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
||||||
|
def get_tf_idf(self,words,word):
|
||||||
# Andy: added paramenter imporant_words in order to do multiplication of score
|
|
||||||
def get_tf_idf(self,words,word, important_words):
|
|
||||||
#tf_idf
|
#tf_idf
|
||||||
#words = whole text
|
#words = whole text
|
||||||
#word the word we finding the score for
|
#word the word we finding the score for
|
||||||
#return the score
|
#return the score
|
||||||
try:
|
try:
|
||||||
tfidf = TfidfVectorizer()
|
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||||
tfidf_matrix = tfidf.fit_transform(words)
|
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
||||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||||
score = df.iloc[0][''.join(word)]
|
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
||||||
for k,v in important_words.items():
|
data = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||||
if k == 'b' and word in v:
|
return data # returns the dict of words/n-grams with tf-idf
|
||||||
score = score * 1.2
|
#print(df) # debugging
|
||||||
elif k == 'h1' and word in v:
|
except:
|
||||||
score = score * 1.75
|
print("Error in tf_idf!")
|
||||||
elif k == 'h2' and word in v:
|
return
|
||||||
score = score * 1.5
|
|
||||||
elif k == 'h3' and word in v:
|
|
||||||
score = score * 1.2
|
|
||||||
elif k == 'title' and word in v:
|
|
||||||
score = score * 2
|
|
||||||
return(score)
|
|
||||||
#print(df)
|
|
||||||
except KeyError:
|
|
||||||
return -1
|
|
||||||
|
|
||||||
|
|
||||||
def get_data(self):
|
def get_data(self):
|
||||||
@@ -149,15 +135,6 @@ class Indexer():
|
|||||||
data = json.load(file_load)
|
data = json.load(file_load)
|
||||||
soup = BeautifulSoup(data["content"],from_encoding=data["encoding"])
|
soup = BeautifulSoup(data["content"],from_encoding=data["encoding"])
|
||||||
words = word_tokenize(soup.get_text())
|
words = word_tokenize(soup.get_text())
|
||||||
|
|
||||||
#getting important tokens
|
|
||||||
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
|
|
||||||
for type in important.keys():
|
|
||||||
for i in soup.findAll(type):
|
|
||||||
for word in word_tokenize(i.text):
|
|
||||||
important[type].append(self.stemmer.stem(word))
|
|
||||||
|
|
||||||
|
|
||||||
toc = perf_counter()
|
toc = perf_counter()
|
||||||
if toc - tic > 1 :
|
if toc - tic > 1 :
|
||||||
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
||||||
@@ -189,8 +166,7 @@ class Indexer():
|
|||||||
for word in stemmed_words:
|
for word in stemmed_words:
|
||||||
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
||||||
tic = perf_counter()
|
tic = perf_counter()
|
||||||
#added argument important
|
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word))
|
||||||
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word, important))
|
|
||||||
toc = perf_counter()
|
toc = perf_counter()
|
||||||
if toc - tic > 1 :
|
if toc - tic > 1 :
|
||||||
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||||
|
10
mytest.py
10
mytest.py
@@ -4,6 +4,7 @@ from sklearn.feature_extraction.text import TfidfVectorizer
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
#tf_idf
|
#tf_idf
|
||||||
#words = whole text
|
#words = whole text
|
||||||
#word the word we finding the score for
|
#word the word we finding the score for
|
||||||
@@ -19,13 +20,12 @@ words = ['this is the first document '
|
|||||||
doc1 = ["I can't fucking take it any more. Among Us has singlehandedly ruined my life. The other day my teacher was teaching us Greek Mythology and he mentioned a pegasus and I immediately thought 'Pegasus? more like Mega Sus!!!!' and I've never wanted to kms more. I can't look at a vent without breaking down and fucking crying. I can't eat pasta without thinking 'IMPASTA??? THATS PRETTY SUS!!!!' Skit 4 by Kanye West. The lyrics ruined me. A Mongoose, or the 25th island of greece. The scientific name for pig. I can't fucking take it anymore. Please fucking end my suffering."]
|
doc1 = ["I can't fucking take it any more. Among Us has singlehandedly ruined my life. The other day my teacher was teaching us Greek Mythology and he mentioned a pegasus and I immediately thought 'Pegasus? more like Mega Sus!!!!' and I've never wanted to kms more. I can't look at a vent without breaking down and fucking crying. I can't eat pasta without thinking 'IMPASTA??? THATS PRETTY SUS!!!!' Skit 4 by Kanye West. The lyrics ruined me. A Mongoose, or the 25th island of greece. The scientific name for pig. I can't fucking take it anymore. Please fucking end my suffering."]
|
||||||
doc2 = ["Anyways, um... I bought a whole bunch of shungite rocks, do you know what shungite is? Anybody know what shungite is? No, not Suge Knight, I think he's locked up in prison. I'm talkin' shungite. Anyways, it's a two billion year-old like, rock stone that protects against frequencies and unwanted frequencies that may be traveling in the air. That's my story, I bought a whole bunch of stuff. Put 'em around the la casa. Little pyramids, stuff like that."]
|
doc2 = ["Anyways, um... I bought a whole bunch of shungite rocks, do you know what shungite is? Anybody know what shungite is? No, not Suge Knight, I think he's locked up in prison. I'm talkin' shungite. Anyways, it's a two billion year-old like, rock stone that protects against frequencies and unwanted frequencies that may be traveling in the air. That's my story, I bought a whole bunch of stuff. Put 'em around the la casa. Little pyramids, stuff like that."]
|
||||||
word = 'life'
|
word = 'life'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tfidf = TfidfVectorizer()
|
tfidf = TfidfVectorizer(ngram_range=(3,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||||
tfidf_matrix = tfidf.fit_transform(doc1)
|
tfidf_matrix = tfidf.fit_transform(words)
|
||||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||||
print(df.iloc[0][''.join(word)])
|
#print(df.iloc[0][''.join(word)])
|
||||||
#print(df)
|
data = df.to_dict()
|
||||||
except KeyError: # word does not exist
|
except KeyError: # word does not exist
|
||||||
print(-1)
|
print(-1)
|
||||||
|
|
||||||
|
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user