Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
efb2c4e2a8 | ||
![]() |
c616b37432 |
30
importanttext.py
Normal file
30
importanttext.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# You can ignore this file. This was for testing purposes
|
||||
|
||||
import json
|
||||
import os
|
||||
import shelve
|
||||
from bs4 import BeautifulSoup
|
||||
from time import perf_counter
|
||||
import requests
|
||||
|
||||
from nltk.tokenize import word_tokenize
|
||||
from nltk.stem import PorterStemmer
|
||||
import numpy as np
|
||||
path_to_script = os.path.dirname(os.path.abspath(__file__))
|
||||
my_filename = os.path.join(path_to_script, "testfile.json")
|
||||
url = "https://www.crummy.com/software/BeautifulSoup/bs4/doc/"
|
||||
|
||||
req = requests.get(url)
|
||||
file = open('D:/Visual Studio Workspace/CS121/assignment3/Search_Engine/testfile.json')
|
||||
content = json.load(file)
|
||||
soup = BeautifulSoup(content["content"], 'lxml')
|
||||
bold = []
|
||||
#print(soup.prettify())
|
||||
print(soup.findAll('h3'))
|
||||
for i in soup.findAll('title'):
|
||||
print(word_tokenize(i.text))
|
||||
print(bold)
|
||||
|
||||
|
||||
|
||||
|
50
indexer.py
50
indexer.py
@@ -102,24 +102,38 @@ class Indexer():
|
||||
print("You have somehow went beyond the magic")
|
||||
return self.save_5
|
||||
|
||||
# retuns a dict of words/n-grams with their assosiated tf-idf score *can also return just a single score or a pandas dataframe
|
||||
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
|
||||
# so I came up with this, if anyone knows how to get a single cell and can explain it to
|
||||
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
||||
# 4am
|
||||
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
||||
def get_tf_idf(self,words,word):
|
||||
|
||||
# Andy: added paramenter imporant_words in order to do multiplication of score
|
||||
def get_tf_idf(self,words,word, important_words):
|
||||
#tf_idf
|
||||
#words = whole text
|
||||
#word the word we finding the score for
|
||||
#return the score
|
||||
try:
|
||||
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
||||
data = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||
return data # returns the dict of words/n-grams with tf-idf
|
||||
#print(df) # debugging
|
||||
except:
|
||||
print("Error in tf_idf!")
|
||||
return
|
||||
tfidf = TfidfVectorizer()
|
||||
tfidf_matrix = tfidf.fit_transform(words)
|
||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||
score = df.iloc[0][''.join(word)]
|
||||
for k,v in important_words.items():
|
||||
if k == 'b' and word in v:
|
||||
score = score * 1.2
|
||||
elif k == 'h1' and word in v:
|
||||
score = score * 1.75
|
||||
elif k == 'h2' and word in v:
|
||||
score = score * 1.5
|
||||
elif k == 'h3' and word in v:
|
||||
score = score * 1.2
|
||||
elif k == 'title' and word in v:
|
||||
score = score * 2
|
||||
return(score)
|
||||
#print(df)
|
||||
except KeyError:
|
||||
return -1
|
||||
|
||||
|
||||
def get_data(self):
|
||||
@@ -135,6 +149,15 @@ class Indexer():
|
||||
data = json.load(file_load)
|
||||
soup = BeautifulSoup(data["content"],from_encoding=data["encoding"])
|
||||
words = word_tokenize(soup.get_text())
|
||||
|
||||
#getting important tokens
|
||||
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
|
||||
for type in important.keys():
|
||||
for i in soup.findAll(type):
|
||||
for word in word_tokenize(i.text):
|
||||
important[type].append(self.stemmer.stem(word))
|
||||
|
||||
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
||||
@@ -166,7 +189,8 @@ class Indexer():
|
||||
for word in stemmed_words:
|
||||
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
||||
tic = perf_counter()
|
||||
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word))
|
||||
#added argument important
|
||||
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word, important))
|
||||
toc = perf_counter()
|
||||
if toc - tic > 1 :
|
||||
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||
|
10
mytest.py
10
mytest.py
@@ -4,7 +4,6 @@ from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
|
||||
#tf_idf
|
||||
#words = whole text
|
||||
#word the word we finding the score for
|
||||
@@ -20,12 +19,13 @@ words = ['this is the first document '
|
||||
doc1 = ["I can't fucking take it any more. Among Us has singlehandedly ruined my life. The other day my teacher was teaching us Greek Mythology and he mentioned a pegasus and I immediately thought 'Pegasus? more like Mega Sus!!!!' and I've never wanted to kms more. I can't look at a vent without breaking down and fucking crying. I can't eat pasta without thinking 'IMPASTA??? THATS PRETTY SUS!!!!' Skit 4 by Kanye West. The lyrics ruined me. A Mongoose, or the 25th island of greece. The scientific name for pig. I can't fucking take it anymore. Please fucking end my suffering."]
|
||||
doc2 = ["Anyways, um... I bought a whole bunch of shungite rocks, do you know what shungite is? Anybody know what shungite is? No, not Suge Knight, I think he's locked up in prison. I'm talkin' shungite. Anyways, it's a two billion year-old like, rock stone that protects against frequencies and unwanted frequencies that may be traveling in the air. That's my story, I bought a whole bunch of stuff. Put 'em around the la casa. Little pyramids, stuff like that."]
|
||||
word = 'life'
|
||||
|
||||
try:
|
||||
tfidf = TfidfVectorizer(ngram_range=(3,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||
tfidf_matrix = tfidf.fit_transform(words)
|
||||
tfidf = TfidfVectorizer()
|
||||
tfidf_matrix = tfidf.fit_transform(doc1)
|
||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||
#print(df.iloc[0][''.join(word)])
|
||||
data = df.to_dict()
|
||||
print(df.iloc[0][''.join(word)])
|
||||
#print(df)
|
||||
except KeyError: # word does not exist
|
||||
print(-1)
|
||||
|
||||
|
1
testfile.json
Normal file
1
testfile.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user