-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathslots.py
305 lines (246 loc) · 12.2 KB
/
slots.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
import csv
import os
import random
import sys
from collections import defaultdict
from itertools import chain
from typing import Dict, List, Any, Union
from fuzzywuzzy import fuzz
from svm_classifier_utlilities import SentenceClassifier, TextClassifier
from tomita.tomita import TomitaPool
class DictionarySlot:
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots: List, *args):
self.id = slot_id
self.ask_sentences = ask_sentence.split('~')
self.gen_dict = generative_dict
self.nongen_dict = nongenerative_dict
self.ngrams = defaultdict(list)
for phrase in chain(nongenerative_dict, generative_dict):
t = phrase.split()
self.ngrams[len(t)].append(phrase)
if values_order:
self.true = values_order[0]
self.threshold = 90
self.input_type = {'text'}
self.filters = {
'any': lambda x, _: True,
'eq': lambda x, y: x == y,
'not_eq': lambda x, y: x != y,
'in': lambda x, y: x in y.split(','),
'not_in': lambda x, y: x not in y.split(',')
}
def infer_from_compositional_request(self, text, input_type='text'):
if input_type not in self.input_type:
return None
return self._infer_from_compositional_request(text)
def _infer_from_compositional_request(self, text):
return self._infer(text)
def infer_from_single_slot(self, text, input_type='text'):
if input_type not in self.input_type:
return None
return self._infer_from_single_slot(text)
def infer_many(self, text, input_type='text'):
if input_type not in self.input_type:
return None
n = len(text)
res = set()
for i in range(n):
for k in range(i + 1, n + 1):
c = self._infer(text[i:k])
if c:
res.add(c)
return res
def _infer_from_single_slot(self, text):
return self._infer(text)
def _normal_value(self, text: str) -> str:
return self.gen_dict.get(text, self.nongen_dict.get(text, 'Лажа какая-то'))
def _infer(self, text: List[Dict[str, Any]]) -> Union[str, None]:
n = len(text)
best_score = 0
best_candidate = None
for window, candidates in self.ngrams.items():
for w in range(0, n - window + 1):
query = ' '.join(x['normal'] for x in text[w:w + window])
if query:
for c in candidates:
score = fuzz.ratio(c, query)
if score > best_score:
best_score = score
best_candidate = c
if best_score >= self.threshold:
return self._normal_value(best_candidate)
def __repr__(self):
return '{}(name={}, len(dict)={})'.format(self.__class__.__name__, self.id, len(self.gen_dict))
def filter(self, value: str) -> bool:
raise NotImplemented()
def ask(self) -> str:
return random.choice(self.ask_sentences)
class AskOnlyDictionarySlot(DictionarySlot):
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots, *args):
super().__init__(slot_id, ask_sentence, generative_dict, nongenerative_dict, values_order, prev_created_slots,
*args)
def _infer_from_compositional_request(self, text: List[Dict[str, Any]]):
# restrict pasring compositional requests
return None
class CurrencySlot(DictionarySlot):
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots, *args):
super().__init__(slot_id, ask_sentence, generative_dict, nongenerative_dict, values_order, prev_created_slots,
*args)
self.supported_currencies = ['rub', 'eur', 'usd'
, 'aud', 'byn', 'cad', 'chf', 'cny', 'czk', 'dkk', 'gbp', 'hkd', 'hrk',
'huf', 'jpy', 'kzt', 'nok', 'pln', 'sgd', 'sek', 'uah', 'try'
]
self.filters['supported_currency'] = lambda x, _: x in self.supported_currencies
self.filters['not_supported_currency'] = lambda x, _: x not in self.supported_currencies
class ClassifierSlot(DictionarySlot):
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots, *args):
super().__init__(slot_id, ask_sentence, generative_dict, nongenerative_dict, values_order, prev_created_slots,
*args)
model_path = os.path.join('models_nlu', self.id + '.model')
self.classifier = SentenceClassifier(None, model_path=model_path) # type: TextClassifier
self.filters.update({
'true': lambda x, _: x == self.true,
'false': lambda x, _: x != self.true
})
def _infer_from_compositional_request(self, text: List[Dict[str, Any]]):
return self.classifier.predict_single(text)
class CompositionalSlot(DictionarySlot):
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots, *args):
super().__init__(slot_id, ask_sentence, generative_dict, nongenerative_dict, values_order, prev_created_slots,
*args)
slotmap = {s.id: s for s in prev_created_slots}
self.children = [slotmap[slot_names] for slot_names in args]
self.input_type = set()
for c in self.children:
self.input_type.update(c.input_type)
def infer_from_compositional_request(self, text, input_type='text'):
for s in self.children:
rv = s.infer_from_compositional_request(text, input_type)
if rv is not None:
return {s.id: rv, self.id: s.id}
return None
def infer_from_single_slot(self, text, input_type='text'):
for s in self.children:
rv = s.infer_from_single_slot(text, input_type)
if rv is not None:
return {s.id: rv, self.id: s.id}
return None
class TomitaSlot(DictionarySlot):
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots, *args):
super().__init__(slot_id, ask_sentence, generative_dict, nongenerative_dict, values_order, prev_created_slots,
*args)
assert len(args) == 2, 'Slot {} has exactly 2 arguments'.format(TomitaSlot.__name__)
config_proto, target_fact = args
self.target_fact = target_fact
config_real_path = os.path.realpath(config_proto)
wd = os.path.dirname(config_real_path)
assert 'TOMITA_PATH' in os.environ, 'Specify path to Tomita binary in $TOMITA_PATH'
tomita_path = os.environ['TOMITA_PATH']
logfile = open(os.path.join('.', 'logs', '{}.log'.format(slot_id)), 'wb')
self.tomita = TomitaPool(tomita_path, config_real_path, cwd=wd, logfile=logfile)
def _infer(self, text: List[Dict[str, Any]]):
joined_text = ' '.join(w['_orig'] for w in text)
for p in '.,!?:;':
joined_text = joined_text.replace(' ' + p, '')
joined_text = joined_text.replace('.', ' ')
res = self.tomita.get_json(joined_text) or None
if res:
target_vals = res['facts'][self.target_fact]
# TODO: ignore all other variants?! Better ideas?
if isinstance(target_vals, list):
target_vals = target_vals[0]
pos = int(target_vals['@pos'])
ln = int(target_vals['@len'])
return joined_text[pos:pos + ln]
class GeoSlot(DictionarySlot):
def __init__(self, slot_id: str, ask_sentence: str, generative_dict: Dict[str, str],
nongenerative_dict: Dict[str, str], values_order: List[str], prev_created_slots, *args):
super().__init__(slot_id, ask_sentence, generative_dict, nongenerative_dict, values_order, prev_created_slots,
*args)
self.input_type = {'geo'}
def _infer(self, location: Dict[str, float]):
return location
def read_slots_from_tsv(pipeline, filename=None):
D = '\t'
if filename is None:
filename = 'slots_definitions.tsv'
with open(filename) as f:
csv_rows = csv.reader(f, delimiter=D, quotechar='"')
slot_name = None
slot_class = None
info_question = None
generative_slot_values = {}
nongenerative_slot_values = {}
def pipe(text):
return ' '.join([w['normal'] for w in pipeline.feed(text)])
result_slots = []
for row in csv_rows:
if slot_name is None:
slot_name, slot_class, *args = row[0].split()[0].split('->')
info_question = row[1].strip()
normal_names_order = []
elif ''.join(row):
nongenerative_syns = ''
generative_syns = ''
if len(row) == 1:
normal_name = row[0]
elif len(row) == 2:
normal_name, generative_syns = row
elif len(row) == 3:
normal_name, generative_syns, nongenerative_syns = row
else:
raise Exception()
normal_name = pipe(normal_name)
normal_names_order.append(normal_name)
if generative_syns:
generative_syns = generative_syns.replace(', ', ',').replace('“', '').replace('”', ''). \
replace('"', '').split(',')
else:
generative_syns = []
if nongenerative_syns:
nongenerative_syns = nongenerative_syns.replace(', ', ',').replace('“', '').replace('”', ''). \
replace('"', '').split(',')
else:
nongenerative_syns = []
if nongenerative_syns and generative_syns:
assert not (set(nongenerative_syns).intersection(set(generative_syns))), [nongenerative_syns,
generative_syns]
for s in nongenerative_syns:
nongenerative_slot_values[pipe(s)] = normal_name
generative_slot_values[normal_name] = normal_name
for s in generative_syns:
generative_slot_values[pipe(s)] = normal_name
else:
SlotClass = getattr(sys.modules[__name__], slot_class)
slot = SlotClass(slot_name, info_question, generative_slot_values, nongenerative_slot_values,
normal_names_order, result_slots, *args)
result_slots.append(slot)
slot_name = None
generative_slot_values = {}
nongenerative_slot_values = {}
if slot_name:
SlotClass = getattr(sys.modules[__name__], slot_class)
slot = SlotClass(slot_name, info_question, generative_slot_values, nongenerative_slot_values,
normal_names_order, result_slots, *args)
result_slots.append(slot)
return result_slots
def read_slots_serialized(folder, pipe):
"""
Read slots from tsv and load saved svm models
:param folder: path to folder with models
:return: array of slots
"""
slots_array = read_slots_from_tsv(pipeline=pipe)
for s in slots_array:
name = os.path.join(folder, s.id + '.model')
if isinstance(s, ClassifierSlot):
if not os.path.exists(name):
raise Exception("{} does not exist".format(name))
s.classifier.load_model(name)
return slots_array