-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_analysis_toolkit.py
48 lines (43 loc) · 1.87 KB
/
data_analysis_toolkit.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import random
from tokenizer import SimpleGermanTokenizer
from tqdm import tqdm
from operator import itemgetter
# TODO: can probably be deleted or should be replaced with nltk.FreqDist
def get_token_frequencies(directory):
tokenizer = SimpleGermanTokenizer()
token_frequencies = {}
for (dirpath, dirnames, filenames) in os.walk(directory):
for filename in tqdm(filenames):
with open(os.path.join(dirpath, filename), 'r') as file:
tokens = tokenizer.tokenize(file.read())
for t in tokens:
if t in token_frequencies:
token_frequencies[t] += 1
else:
token_frequencies[t] = 1
return token_frequencies
# TODO: can probably be deleted or should be replaced with nltk.FreqDist
def get_sorted_tokens(directory, n=None):
token_frequencies = get_token_frequencies(directory)
return sorted(token_frequencies.items(), key=itemgetter(1), reverse=True)[:n]
def load_documents(file_names):
documents = []
load_errors = []
for filename in file_names:
with open(filename, 'r') as file:
try:
documents.append(file.read())
except UnicodeDecodeError:
load_errors.append(filename)
random.shuffle(documents)
return documents, load_errors
def get_files_from_folder(path):
file_names = []
for (dirpath, dirnames, subfilenames) in os.walk(path):
for filename in subfilenames:
file_names.append((os.path.join(dirpath, filename)))
return file_names
def extract_sentences(content, sentence_detector):
return [sentence.replace('\n', ' ') for sentence in sentence_detector.tokenize(content) if not sentence.isspace()
and sentence[:5] != '"http' and len(sentence) > 12 and 4 * sum(c.isdigit() for c in sentence) < len(sentence)]