-
Notifications
You must be signed in to change notification settings - Fork 225
/
tag_phrases.py
executable file
·78 lines (63 loc) · 2.76 KB
/
tag_phrases.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/usr/bin/env python
import argparse, os.path
import cPickle as pickle
import nltk.data, nltk.tag
from nltk_trainer import load_corpus_reader
from nltk_trainer.writer.chunked import ChunkedCorpusWriter
########################################
## command options & argument parsing ##
########################################
# TODO: many of the args are shared with analyze_classifier_coverage, so abstract
parser = argparse.ArgumentParser(description='Classify a plaintext corpus to a classified corpus')
# TODO: make sure source_corpus can be a single file
parser.add_argument('source_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('target_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to 1. 0 is no trace output.')
parser.add_argument('--tagger', default=nltk.tag._POS_TAGGER,
help='''pickled tagger filename/path relative to an nltk_data directory
default is NLTK's default tagger''')
# TODO: from analyze_tagged_corpus.py
corpus_group = parser.add_argument_group('Corpus Reader Options')
corpus_group.add_argument('--reader',
default='nltk.corpus.reader.plaintext.PlaintextCorpusReader',
help='Full module path to a corpus reader class, defaults to %(default)s.')
corpus_group.add_argument('--fileids', default=None,
help='Specify fileids to load from corpus')
corpus_group.add_argument('--sent-tokenizer', default='tokenizers/punkt/english.pickle',
help='Path to pickled sentence tokenizer')
corpus_group.add_argument('--word-tokenizer', default='nltk.tokenize.WordPunctTokenizer',
help='Full module path to a tokenizer class, defaults to %(default)s.')
args = parser.parse_args()
###################
## corpus reader ##
###################
source_corpus = load_corpus_reader(args.source_corpus, reader=args.reader,
fileids=args.fileids, encoding='utf-8', sent_tokenizer=args.sent_tokenizer,
word_tokenizer=args.word_tokenizer)
if not source_corpus:
raise ValueError('%s is an unknown corpus')
if args.trace:
print('loaded %s' % args.source_corpus)
############
## tagger ##
############
# TODO: from analyze_tagger_coverage.py
if args.trace:
print('loading tagger %s' % args.tagger)
try:
tagger = nltk.data.load(args.tagger)
except LookupError:
try:
import cPickle as pickle
except ImportError:
import pickle
tagger = pickle.load(open(os.path.expanduser(args.tagger)))
#############
## tagging ##
#############
with ChunkedCorpusWriter(fileids=source_corpus.fileids(), path=args.target_corpus) as writer:
for fileid in source_corpus.fileids():
paras = source_corpus.paras(fileids=[fileid])
tagged_paras = ((tagger.tag(sent) for sent in para) for para in paras)
writer.write_paras(tagged_paras, fileid=fileid)