Compare commits
8 Commits
tf_idf
...
Lacerum-pa
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e7c4170cc2 | ||
![]() |
c4b3512df7 | ||
![]() |
c8640001c7 | ||
![]() |
f1fe3b26ac | ||
![]() |
5c703b6471 | ||
![]() |
c892bbac03 | ||
![]() |
efb2c4e2a8 | ||
![]() |
c616b37432 |
30
importanttext.py
Normal file
30
importanttext.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# You can ignore this file. This was for testing purposes
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shelve
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from time import perf_counter
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from nltk.tokenize import word_tokenize
|
||||||
|
from nltk.stem import PorterStemmer
|
||||||
|
import numpy as np
|
||||||
|
path_to_script = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
my_filename = os.path.join(path_to_script, "testfile.json")
|
||||||
|
url = "https://www.crummy.com/software/BeautifulSoup/bs4/doc/"
|
||||||
|
|
||||||
|
req = requests.get(url)
|
||||||
|
file = open('D:/Visual Studio Workspace/CS121/assignment3/Search_Engine/testfile.json')
|
||||||
|
content = json.load(file)
|
||||||
|
soup = BeautifulSoup(content["content"], 'lxml')
|
||||||
|
bold = []
|
||||||
|
#print(soup.prettify())
|
||||||
|
print(soup.findAll('h3'))
|
||||||
|
for i in soup.findAll('title'):
|
||||||
|
print(word_tokenize(i.text))
|
||||||
|
print(bold)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
147
indexer.py
147
indexer.py
@@ -15,7 +15,8 @@ import os
|
|||||||
import shelve
|
import shelve
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from time import perf_counter
|
from time import perf_counter
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
#Data process
|
#Data process
|
||||||
@@ -29,6 +30,7 @@ import re
|
|||||||
|
|
||||||
#Logging postings
|
#Logging postings
|
||||||
from posting import Posting
|
from posting import Posting
|
||||||
|
from worker import Worker
|
||||||
|
|
||||||
|
|
||||||
class Indexer():
|
class Indexer():
|
||||||
@@ -61,16 +63,27 @@ class Indexer():
|
|||||||
|
|
||||||
|
|
||||||
self.save_1 = shelve.open("save_1.shelve")
|
self.save_1 = shelve.open("save_1.shelve")
|
||||||
|
self.save_1_lock = threading.Lock()
|
||||||
self.save_2 = shelve.open("save_2.shelve")
|
self.save_2 = shelve.open("save_2.shelve")
|
||||||
|
self.save_2_lock = threading.Lock()
|
||||||
self.save_3 = shelve.open("save_3.shelve")
|
self.save_3 = shelve.open("save_3.shelve")
|
||||||
|
self.save_3_lock = threading.Lock()
|
||||||
self.save_4 = shelve.open("save_4.shelve")
|
self.save_4 = shelve.open("save_4.shelve")
|
||||||
|
self.save_4_lock = threading.Lock()
|
||||||
self.save_5 = shelve.open("save_5.shelve")
|
self.save_5 = shelve.open("save_5.shelve")
|
||||||
|
self.save_5_lock = threading.Lock()
|
||||||
|
|
||||||
|
print(len(list(self.save_1.keys())))
|
||||||
|
print(len(list(self.save_2.keys())))
|
||||||
|
print(len(list(self.save_3.keys())))
|
||||||
|
print(len(list(self.save_4.keys())))
|
||||||
|
print(len(list(self.save_5.keys())))
|
||||||
|
|
||||||
def save_index(self,word,posting):
|
def save_index(self,word,posting):
|
||||||
cur_save = self.get_save_file(word)
|
cur_save = self.get_save_file(word)
|
||||||
|
lock = self.get_save_lock(word)
|
||||||
|
lock.acquire()
|
||||||
shelve_list = list()
|
shelve_list = list()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
shelve_list = cur_save[word]
|
shelve_list = cur_save[word]
|
||||||
shelve_list.append(posting)
|
shelve_list.append(posting)
|
||||||
@@ -80,10 +93,12 @@ class Indexer():
|
|||||||
if toc - tic > 1 :
|
if toc - tic > 1 :
|
||||||
print("Took " + str(toc - tic) + "seconds to sort shelve list !")
|
print("Took " + str(toc - tic) + "seconds to sort shelve list !")
|
||||||
cur_save.sync()
|
cur_save.sync()
|
||||||
|
lock.release()
|
||||||
except:
|
except:
|
||||||
shelve_list.append(posting)
|
shelve_list.append(posting)
|
||||||
cur_save[word] = shelve_list
|
cur_save[word] = shelve_list
|
||||||
cur_save.sync()
|
cur_save.sync()
|
||||||
|
lock.release()
|
||||||
|
|
||||||
def get_save_file(self,word):
|
def get_save_file(self,word):
|
||||||
#return the correct save depending on the starting letter of word
|
#return the correct save depending on the starting letter of word
|
||||||
@@ -102,20 +117,64 @@ class Indexer():
|
|||||||
print("You have somehow went beyond the magic")
|
print("You have somehow went beyond the magic")
|
||||||
return self.save_5
|
return self.save_5
|
||||||
|
|
||||||
|
def get_save_lock(self,word):
|
||||||
|
word_lower = word.lower()
|
||||||
|
if re.match(r"^[a-d0-1].*",word_lower):
|
||||||
|
return self.save_1_lock
|
||||||
|
elif re.match(r"^[e-k2-3].*",word_lower):
|
||||||
|
return self.save_2_lock
|
||||||
|
elif re.match(r"^[l-q4-7].*",word_lower):
|
||||||
|
return self.save_3_lock
|
||||||
|
elif re.match(r"^[r-z8-9].*",word_lower):
|
||||||
|
return self.save_4_lock
|
||||||
|
else:
|
||||||
|
print(word)
|
||||||
|
print("You have somehow went beyond the magic")
|
||||||
|
return self.save_5_lock.acquire()
|
||||||
|
|
||||||
|
# I have a test file (mytest.py) with pandas but couldn't figure out how to grab just a single cell.
|
||||||
|
# so I came up with this, if anyone knows how to get a single cell and can explain it to
|
||||||
|
# me I would love to know, as I think that method might be quicker, maybe, idk it like
|
||||||
|
# 4am
|
||||||
|
|
||||||
# retuns a dict of words/n-grams with their assosiated tf-idf score *can also return just a single score or a pandas dataframe
|
# retuns a dict of words/n-grams with their assosiated tf-idf score *can also return just a single score or a pandas dataframe
|
||||||
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
|
||||||
def get_tf_idf(self,words,word):
|
|
||||||
|
# Andy: added paramenter imporant_words in order to do multiplication of score
|
||||||
|
def get_tf_idf(self,words,word, important_words):
|
||||||
#tf_idf
|
#tf_idf
|
||||||
#words = whole text
|
#words = whole text
|
||||||
#word the word we finding the score for
|
#word the word we finding the score for
|
||||||
#return the score
|
#return the score
|
||||||
|
try:
|
||||||
|
'''
|
||||||
|
tfidf = TfidfVectorizer()
|
||||||
|
tfidf_matrix = tfidf.fit_transform(words)
|
||||||
|
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out())
|
||||||
|
score = df.iloc[0][''.join(word)]
|
||||||
|
for k,v in important_words.items():
|
||||||
|
if k == 'b' and word in v:
|
||||||
|
score = score * 1.2
|
||||||
|
elif k == 'h1' and word in v:
|
||||||
|
score = score * 1.75
|
||||||
|
elif k == 'h2' and word in v:
|
||||||
|
score = score * 1.5
|
||||||
|
elif k == 'h3' and word in v:
|
||||||
|
score = score * 1.2
|
||||||
|
elif k == 'title' and word in v:
|
||||||
|
score = score * 2
|
||||||
|
return(score)
|
||||||
|
#print(df)
|
||||||
|
except KeyError:
|
||||||
|
return -1
|
||||||
|
'''
|
||||||
try:
|
try:
|
||||||
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||||
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
tfidf_matrix = tfidf.fit_transform(words) # fit trains the model, transform creates matrix
|
||||||
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||||
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
#return(df.iloc[0][''.join(word)]) #used for finding single word in dataset
|
||||||
data = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
tfidf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||||
return data # returns the dict of words/n-grams with tf-idf
|
return tfidf_dict # returns the dict of words/n-grams with tf-idf as value
|
||||||
#print(df) # debugging
|
#print(df) # debugging
|
||||||
except:
|
except:
|
||||||
print("Error in tf_idf!")
|
print("Error in tf_idf!")
|
||||||
@@ -123,69 +182,41 @@ class Indexer():
|
|||||||
|
|
||||||
|
|
||||||
def get_data(self):
|
def get_data(self):
|
||||||
|
|
||||||
|
num_threads = 1
|
||||||
|
threads = list()
|
||||||
|
|
||||||
for directory in os.listdir(self.path):
|
for directory in os.listdir(self.path):
|
||||||
for file in os.listdir(self.path + "/" + directory + "/"):
|
for file in os.listdir(self.path + "/" + directory + "/"):
|
||||||
#Actual files here
|
#Actual files here
|
||||||
#JSON["url"] = url of crawled page, ignore fragments
|
#JSON["url"] = url of crawled page, ignore fragments
|
||||||
#JSON["content"] = actual HTML
|
#JSON["content"] = actual HTML
|
||||||
#JSON["encoding"] = ENCODING
|
#JSON["encoding"] = ENCODING
|
||||||
ticker = perf_counter()
|
index = 0
|
||||||
tic = perf_counter()
|
while True:
|
||||||
file_load = open(self.path + "/" + directory + "/"+file)
|
file_path = self.path + "" + directory + "/"+file
|
||||||
data = json.load(file_load)
|
if len(threads) < num_threads:
|
||||||
soup = BeautifulSoup(data["content"],from_encoding=data["encoding"])
|
thread = Worker(self,file_path)
|
||||||
words = word_tokenize(soup.get_text())
|
threads.append(thread)
|
||||||
toc = perf_counter()
|
thread.start()
|
||||||
if toc - tic > 1 :
|
break
|
||||||
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
else:
|
||||||
|
if not threads[index].is_alive():
|
||||||
|
threads[index] = Worker(self,file_path)
|
||||||
|
threads[index].start()
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
index = index + 1
|
||||||
|
if(index >= num_threads):
|
||||||
|
index = 0
|
||||||
|
time.sleep(.1)
|
||||||
|
|
||||||
tokenized_words = list()
|
#Found 55770 documents
|
||||||
stemmed_words = list()
|
#
|
||||||
|
|
||||||
tic = perf_counter()
|
#getting important tokens
|
||||||
for word in words:
|
|
||||||
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
|
||||||
#So all the tokenized words are here,
|
|
||||||
tokenized_words.append(word)
|
|
||||||
toc = perf_counter()
|
|
||||||
if toc - tic > 1 :
|
|
||||||
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
|
||||||
#YOUR CODE HERE
|
|
||||||
|
|
||||||
tic = perf_counter()
|
|
||||||
for word in tokenized_words:
|
|
||||||
stemmed_words.append(self.stemmer.stem(word))
|
|
||||||
#stemming,
|
|
||||||
#tf_idf
|
|
||||||
#get_tf_idf(stemmed_words,word)
|
|
||||||
#post = Posting()
|
|
||||||
toc = perf_counter()
|
|
||||||
if toc - tic > 1 :
|
|
||||||
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
|
||||||
|
|
||||||
for word in stemmed_words:
|
|
||||||
#posting = Posting(data["url"],self.get_tf_idf(list(' '.join(stemmed_words)),word))
|
|
||||||
tic = perf_counter()
|
|
||||||
posting = Posting(data["url"],self.tf_idf_raw(stemmed_words,word))
|
|
||||||
toc = perf_counter()
|
|
||||||
if toc - tic > 1 :
|
|
||||||
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
|
||||||
|
|
||||||
tic = perf_counter()
|
|
||||||
self.save_index(word,posting)
|
|
||||||
toc = perf_counter()
|
|
||||||
if toc - tic > 1 :
|
|
||||||
print("Took " + str(toc - tic) + "seconds to save text !")
|
|
||||||
|
|
||||||
tocker = perf_counter()
|
|
||||||
print("Finished " + data['url'] + " in \t " + str(tocker-ticker))
|
|
||||||
|
|
||||||
def tf_idf_raw(self,words,word):
|
|
||||||
tf_times = words.count(word)
|
|
||||||
|
|
||||||
tf = tf_times/len(words)
|
|
||||||
|
|
||||||
return tf
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
1
testfile.json
Normal file
1
testfile.json
Normal file
File diff suppressed because one or more lines are too long
114
worker.py
Normal file
114
worker.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
from threading import Thread
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shelve
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from time import perf_counter
|
||||||
|
import time
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
#Data process
|
||||||
|
from nltk.tokenize import word_tokenize
|
||||||
|
from nltk.stem import PorterStemmer
|
||||||
|
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
|
from posting import Posting
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
class Worker(Thread):
|
||||||
|
def __init__(self,indexer,target):
|
||||||
|
self.file = target
|
||||||
|
self.indexer = indexer
|
||||||
|
super().__init__(daemon=True)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
print("Target: " + str(self.file))
|
||||||
|
ticker = perf_counter()
|
||||||
|
tic = perf_counter()
|
||||||
|
file_load = open(self.file)
|
||||||
|
data = json.load(file_load)
|
||||||
|
soup = BeautifulSoup(data["content"],features="lxml")
|
||||||
|
words = word_tokenize(soup.get_text())
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to tokenize text !")
|
||||||
|
|
||||||
|
tokenized_words = list()
|
||||||
|
stemmed_words = list()
|
||||||
|
|
||||||
|
important = {'b' : [], 'h1' : [], 'h2' : [], 'h3' : [], 'title' : []}
|
||||||
|
for key_words in important.keys():
|
||||||
|
for i in soup.findAll(key_words):
|
||||||
|
for word in word_tokenize(i.text):
|
||||||
|
important[key_words].append(self.indexer.stemmer.stem(word))
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
|
for word in words:
|
||||||
|
if word != "" and re.fullmatch('[A-Za-z0-9]+',word):
|
||||||
|
tokenized_words.append(word)
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to isalnum text !")
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
|
for word in tokenized_words:
|
||||||
|
stemmed_words.append(self.indexer.stemmer.stem(word))
|
||||||
|
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to stemmed text !")
|
||||||
|
|
||||||
|
"""
|
||||||
|
tfidf = TfidfVectorizer(ngram_range=(1,3)) # ngram_range is range of n-values for different n-grams to be extracted (1,3) gets unigrams, bigrams, trigrams
|
||||||
|
tfidf_matrix = tfidf.fit_transform(stemmed_words) # fit trains the model, transform creates matrix
|
||||||
|
#df = pd.DataFrame(tfidf_matrix.toarray(), columns = tfidf.get_feature_names_out()) # store value of matrix to associated word/n-gram
|
||||||
|
tfidf.sget_feature_names_out()
|
||||||
|
#tf_idf_dict = df.to_dict() # transform dataframe to dict *could be expensive the larger the data gets, tested on ~1000 word doc and took 0.002 secs to run
|
||||||
|
|
||||||
|
print(tfidf_matrix)
|
||||||
|
"""
|
||||||
|
|
||||||
|
tfIdfVectorizer=TfidfVectorizer(use_idf=True)
|
||||||
|
tfIdf = tfIdfVectorizer.fit_transform(stemmed_words)
|
||||||
|
df = pd.DataFrame(tfIdf[0].T.todense(), index=tfIdfVectorizer.get_feature_names_out(), columns=["TF-IDF"])
|
||||||
|
df = df.sort_values('TF-IDF', ascending=False)
|
||||||
|
|
||||||
|
print(df.head(25))
|
||||||
|
|
||||||
|
for word in tf_idf_dict.keys():
|
||||||
|
tic = perf_counter()
|
||||||
|
print(tf_idf_dict)
|
||||||
|
weight = 1.0
|
||||||
|
for k,v in important.items():
|
||||||
|
if k == 'b' and word in v:
|
||||||
|
weight = 1.2
|
||||||
|
elif k == 'h1' and word in v:
|
||||||
|
weight = 1.75
|
||||||
|
elif k == 'h2' and word in v:
|
||||||
|
weight = 1.5
|
||||||
|
elif k == 'h3' and word in v:
|
||||||
|
weight = 1.2
|
||||||
|
elif k == 'title' and word in v:
|
||||||
|
weight = 2
|
||||||
|
|
||||||
|
posting = Posting(data["url"],tf_idf_dict[word]*weight)
|
||||||
|
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to tf_idf text !")
|
||||||
|
|
||||||
|
tic = perf_counter()
|
||||||
|
self.indexer.save_index(word,posting)
|
||||||
|
toc = perf_counter()
|
||||||
|
if toc - tic > 1 :
|
||||||
|
print("Took " + str(toc - tic) + "seconds to save text !")
|
||||||
|
|
||||||
|
tocker = perf_counter()
|
||||||
|
print("Finished " + data['url'] + "\n" + str(tocker-ticker))
|
Reference in New Issue
Block a user