-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcorpus_loader.py
More file actions
96 lines (80 loc) · 4.07 KB
/
corpus_loader.py
File metadata and controls
96 lines (80 loc) · 4.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
from glob import glob
import os
from collections import Counter
from document_data import DocumentData
from tokenizer import Tokenizer
from trie import Trie
import ir_datasets
import pickle
class CorpusLoader:
def __init__(self, tokenizer: Tokenizer) -> None:
self.tokenizer = tokenizer
# TODO - implement cache
def load_from_path(self, path, current_vocabulary: dict[str, dict[int,int]], current_documents: dict[int, DocumentData]):
if os.path.exists('.cache/corpus.pickle'):
with open('.cache/corpus.pickle', 'rb') as f:
current_vocabulary, current_documents = pickle.load(f)
else:
doc_id = len(current_documents) + 1
for filepath in glob(path):
# opens and reads all files
try:
with open(filepath, "r", encoding="utf-8") as file:
readed_file = file.read()
except(IsADirectoryError):
print("IsADirectoryError")
words = self.tokenizer.tokenize(readed_file)
if len(words) > 0:
max_word_frequency = 0
for word in words:
if word not in current_vocabulary:
current_vocabulary[word] = {}
if doc_id not in current_vocabulary[word]:
current_vocabulary[word][doc_id] = 0
current_vocabulary[word][doc_id] += 1
max_word_frequency = max(max_word_frequency, current_vocabulary[word][doc_id])
doc_data = DocumentData(path, os.path.basename(filepath), doc_id, len(words), max_word_frequency)
current_documents[doc_id] = doc_data
doc_id += 1
with open(".cache/corpus.pickle", 'wb') as f:
pickle.dump((current_vocabulary, current_documents), f)
return current_vocabulary, current_documents
# def load_from_ir_datasets(self, dataset_name: str, trie: Trie, current_documents: dict[int, DocumentData]) -> tuple[Trie, dict[int, DocumentData]]:
# dataset = ir_datasets.load(dataset_name)
# # doc_id = len(current_documents) + 1
# for doc in dataset.docs_iter():
# if doc.text == '':
# print('empty doc', doc.doc_id)
# continue
# words = self.tokenizer.tokenize(doc.text+doc.title)
# assert len(words) > 0
# trie.insert_document(words, doc.doc_id)
# words_frequency = Counter(words)
# doc_data = DocumentData(doc.doc_id, doc.title, doc.doc_id, len(words), max(words_frequency.values()))
# current_documents[doc.doc_id] = doc_data
# # doc_id += 1
# trie.documents = current_documents
# return trie, current_documents
def new_load_from_ir_datasets(self, dataset_name: str, current_vocabulary: dict[str, dict[int,int]], current_documents: dict[int, DocumentData]):
dataset = ir_datasets.load(dataset_name)
for doc in dataset.docs_iter():
if doc.text == '':
print('empty doc', doc.doc_id)
continue
words = self.tokenizer.tokenize(doc.text)
max_word_frequency = 0
assert len(words) > 0
doc_id = int(doc.doc_id)
for word in words:
if word not in current_vocabulary:
current_vocabulary[word] = {}
if doc_id not in current_vocabulary[word]:
current_vocabulary[word][doc_id] = 0
current_vocabulary[word][doc_id] += 1
max_word_frequency = max(max_word_frequency, current_vocabulary[word][doc_id])
try:
doc_data = DocumentData('', doc.title, doc_id, len(words), max_word_frequency)
except:
doc_data = DocumentData('', doc_id, doc_id, len(words), max_word_frequency)
current_documents[doc_id] = doc_data
return current_vocabulary, current_documents