Merge remote-tracking branch 'origin/posting'
This commit is contained in:
commit
5c703b6471
30
importanttext.py
Normal file
30
importanttext.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# You can ignore this file. This was for testing purposes
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shelve
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from time import perf_counter
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from nltk.tokenize import word_tokenize
|
||||||
|
from nltk.stem import PorterStemmer
|
||||||
|
import numpy as np
|
||||||
|
path_to_script = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
my_filename = os.path.join(path_to_script, "testfile.json")
|
||||||
|
url = "https://www.crummy.com/software/BeautifulSoup/bs4/doc/"
|
||||||
|
|
||||||
|
req = requests.get(url)
|
||||||
|
file = open('D:/Visual Studio Workspace/CS121/assignment3/Search_Engine/testfile.json')
|
||||||
|
content = json.load(file)
|
||||||
|
soup = BeautifulSoup(content["content"], 'lxml')
|
||||||
|
bold = []
|
||||||
|
#print(soup.prettify())
|
||||||
|
print(soup.findAll('h3'))
|
||||||
|
for i in soup.findAll('title'):
|
||||||
|
print(word_tokenize(i.text))
|
||||||
|
print(bold)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
78
indexer.py
78
indexer.py
@ -135,7 +135,9 @@ class Indexer():
|
|||||||
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
||||||
# 4am
|
# 4am
|
||||||
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
||||||
def get_tf_idf(self,words,word):
|
|
||||||
|
# Andy: added paramenter imporant_words in order to do multiplication of score
|
||||||
|
def get_tf_idf(self,words,word, important_words):
|
||||||
#tf_idf
|
#tf_idf
|
||||||
#words = whole text
|
#words = whole text
|
||||||
#word the word we finding the score for
|
#word the word we finding the score for
|
||||||
@ -144,7 +146,19 @@ class Indexer():
|
|||||||
tfidf = TfidfVectorizer()
|
tfidf = TfidfVectorizer()
|
||||||
tfidf_matrix = tfidf.fit_transform(words)
|
tfidf_matrix = tfidf.fit_transform(words)
|
||||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||||
return(df.iloc[0][''.join(word)])
|
score = df.iloc[0][''.join(word)]
|
||||||
|
for k,v in important_words.items():
|
||||||
|
if k == 'b' and word in v:
|
||||||
|
score = score * 1.2
|
||||||
|
elif k == 'h1' and word in v:
|
||||||
|
score = score * 1.75
|
||||||
|
elif k == 'h2' and word in v:
|
||||||
|
score = score * 1.5
|
||||||
|
elif k == 'h3' and word in v:
|
||||||
|
score = score * 1.2
|
||||||
|
elif k == 'title' and word in v:
|
||||||
|
score = score * 2
|
||||||
|
return(score)
|
||||||
#print(df)
|
#print(df)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return -1
|
return -1
|
||||||
@ -183,6 +197,66 @@ class Indexer():
|
|||||||
#Found 55770 documents
|
#Found 55770 documents
|
||||||
#
|
#
|
||||||
|
|
||||||
|
ticker = perf_counter()
|
||||||
|
tic = perf_counter()
|
||||||
|
file_load = open(self.path + "/" + directory + "/"+file)
|
||||||
|
data = json.load(file_load)
|
||||||
|
soup = BeautifulSoup(data["content"],from_encoding=data["encoding"])
|
||||||
|
words = word_tokenize(soup.get_text())
|
||||||
|
|
||||||
|
#getting important tokens
|
||||||
|
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
|
||||||
|
for type in important.keys():
|
||||||
|
for i in soup.findAll(type):
|
||||||
|
for word in word_tokenize(i.text):
|
||||||
|
important[type].append(self.stemmer.stem(word))
|
||||||
|
|
||||||
|
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
||||||
|
|
||||||
|
tokenized_words = list()
|
||||||
|
stemmed_words = list()
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
|
for word in words:
|
||||||
|
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
||||||
|
#So all the tokenized words are here,
|
||||||
|
tokenized_words.append(word)
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
||||||
|
#YOUR CODE HERE
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
|
for word in tokenized_words:
|
||||||
|
stemmed_words.append(self.stemmer.stem(word))
|
||||||
|
#stemming,
|
||||||
|
#tf_idf
|
||||||
|
#get_tf_idf(stemmed_words,word)
|
||||||
|
#post = Posting()
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
||||||
|
|
||||||
|
for word in stemmed_words:
|
||||||
|
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
||||||
|
tic = perf_counter()
|
||||||
|
#added argument important
|
||||||
|
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word, important))
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
|
self.save_index(word,posting)
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to save text !")
|
||||||
|
|
||||||
|
tocker = perf_counter()
|
||||||
|
print("Finished " + data['url'] + " in \t " + str(tocker-ticker))
|
||||||
|
|
||||||
def tf_idf_raw(self,words,word):
|
def tf_idf_raw(self,words,word):
|
||||||
tf_times = words.count(word)
|
tf_times = words.count(word)
|
||||||
|
1
testfile.json
Normal file
1
testfile.json
Normal file
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user