-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy paththirdeye.py
More file actions
182 lines (142 loc) · 7.08 KB
/
thirdeye.py
File metadata and controls
182 lines (142 loc) · 7.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
from contextlib import closing
from collections import Counter
import itertools
import sys # stderr
import json
import argparse
import os
import guess_helper
import guess_matching
import guess_phrases
from guess_matching import CandidateWord # to deserialize!
def load_global_data(global_conf):
# Load dictionary
(matchers, translations) = guess_helper.load_dictionary(global_conf['lexicon'])
# Load training data
train_target = Counter(" ".join(guess_helper.load_file_lines(global_conf['train-target'])).split())
# Load LEIDOS unigrams statistics
unigramlines = guess_helper.load_file_lines(global_conf['leidos-unigrams'])
unigramlines = filter(lambda l: len(l.split()) != 1, unigramlines) # filter out empty-string unigrams
unigramlines = [(l.split()[1], int(l.split()[0])) for l in unigramlines] # parse `uniq -c` output
leidos_unigrams = Counter(dict(unigramlines))
# Load grammar
(adjectivizers, prefixers, suffixers, untranslatables, noun_adjective_dict) \
= guess_helper.load_grammar(global_conf['grammar'], global_conf['pertainyms'])
return ((matchers, translations), train_target, leidos_unigrams, (adjectivizers, prefixers, suffixers, untranslatables, noun_adjective_dict))
def prepare_guessing(oov_original_list, catmorfdict):
# Start filling our guess dictionary!
# oov_guesses = {oov: [(guess, score)]}
oov_guesses = {}
# Sort oovlist entries into non-alphabetic tokens and actual OOVs
guessable_oovs = Counter()
for w in oov_original_list:
# Filter out purely non-alphabetic tokens
if not any(c.isalpha() for c in w):
#print("{:<20} ~~> non-alpha token".format(w))
oov_guesses[w] = [{'translation': w, 'score': 1.0, 'features': []}]
else:
guessable_oovs[w] += 1
return (oov_guesses, guessable_oovs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Guess OOVs.')
subparsers = parser.add_subparsers(title='action')
mode_server = subparsers.add_parser('mode_server', help='Expose webservice for full pipeline')
mode_server.set_defaults(which='mode_server')
mode_batch = subparsers.add_parser('mode_batch', help='Score whole sets / batches')
mode_batch.add_argument('setname')
mode_batch.set_defaults(which='mode_batch')
args = parser.parse_args()
if args.which == 'mode_server':
import morfessor
import bottle
# Load static data
conf = guess_helper.load_config(None)
((matchers, translations), train_target, leidos_unigrams, (adjectivizers, prefixers, suffixers, untranslatables, noun_adjective_dict)) = load_global_data(conf['global-files'])
morfmodel = morfessor.MorfessorIO().read_binary_model_file(conf['server-files']['morfmodel'])
print("Loaded files")
# Read data and crunch
app = bottle.Bottle()
@app.hook('after_request')
def enable_cors(): # https://gist.github.com/richard-flosi/3789163
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
bottle.response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
bottle.response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
@app.route('/lookupword')
def crunch_word():
params = bottle.request.query.decode()
import unicodedata
oov = guess_helper.uninorm(params['oov'])
oov_original_list = [oov]
catmorfdict = {oov: list(zip(morfmodel.viterbi_segment(oov)[0], itertools.repeat("STM")))}
print("Segmented words into " + str(catmorfdict))
phraseparts = list(itertools.chain(*itertools.chain(*map(guess_phrases.gen_phrases, catmorfdict.values()))))
uniq_phraseparts = guess_helper.uniq_list(phraseparts) # uniq for unhashable lists!
all_matches = dict(itertools.starmap(guess_matching.lookup_oov, zip(uniq_phraseparts, itertools.repeat(matchers))))
print("Matched phraseparts " + str(phraseparts))
(oov_guesses, guessable_oovs_counter) = prepare_guessing(oov_original_list, catmorfdict)
# If it wasn't a non-OOV, do the painful thing
if oov_guesses == {}:
static_data = ( all_matches,
translations,
catmorfdict,
guessable_oovs_counter,
train_target,
leidos_unigrams,
(adjectivizers, prefixers, suffixers, untranslatables, noun_adjective_dict),
conf)
oov_guesses[oov] = guess_phrases.phraseguess_actual_oov(oov, static_data = static_data)[:100]
bottle.response.content_type = 'application/json'
return json.dumps(oov_guesses[oov])
bottle.run(app, host='localhost', port=8080)
elif args.which == 'mode_batch':
# Load static data
conf = guess_helper.load_config(args.setname)
((matchers, translations), train_target, leidos_unigrams, (adjectivizers, prefixers, suffixers, untranslatables, noun_adjective_dict)) = load_global_data(conf['global-files'])
oov_original_list = guess_helper.load_file_lines(conf['set-files']['oovfile'])
catmorfdict = guess_helper.load_catmorfdict(oov_original_list, conf['set-files']['catmorffile'])
# Load previously calculated matches
print("Now loading all previous matches from", conf['global-files']['allmatches'], "...", end='', file = sys.stderr)
with open(conf['global-files']['allmatches']) as f:
all_matches = dict(map(lambda t: eval(guess_helper.uninorm(t)), f.read().splitlines()))
print("done!", file = sys.stderr)
# Prepare guessing data
(oov_guesses, guessable_oovs_counter) = prepare_guessing(oov_original_list, catmorfdict)
print("{} distinct OOVs to guess.".format(len(guessable_oovs_counter)), file = sys.stderr)
# Sort
raw_guessable_oovs = list(guessable_oovs_counter)
sorted_guessable_oovs = sorted(raw_guessable_oovs)
# Distribute static data
static_data = ( all_matches,
translations,
catmorfdict,
guessable_oovs_counter,
train_target,
leidos_unigrams,
(adjectivizers, prefixers, suffixers, untranslatables, noun_adjective_dict),
conf)
# Do the guessing!
guess_results = [guess_phrases.phraseguess_actual_oov(o, static_data) for o in sorted_guessable_oovs]
all_results = sorted(list(zip(sorted_guessable_oovs, guess_results)), key = lambda r: r[1][0]['score'], reverse = True)
def print_results(t):
(oov, candidates) = t
print("{:>20} -> {:<20}".format(oov, candidates[0]['translation']), end='')
print("{:10.7f} <- ".format(candidates[0]['score']), end='')
for s in candidates[0]['features'].values():
print(" {:10.7f}".format(s), end='')
print("")
list(map(print_results, all_results[0:20]))
print(" [...]")
list(map(print_results, all_results[-20:]))
oov_guesses.update(dict(all_results))
# Write our results in original order into result file
with open(conf['set-files']['1best-out'], 'w') as f:
for oov in oov_original_list:
print(oov_guesses[oov][0]['translation'], file = f)
def nbest(t):
oov, candidates = t
return (oov, candidates[:10])
with open(conf['set-files']['nbest-out'], 'w') as f:
print(json.dumps(dict(map(nbest, oov_guesses.items()))), file = f)
else:
print("Unknown mode", args.which)
exit(1)