Upload preprocess.py
Browse files- preprocess.py +210 -0
preprocess.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from turtle import st
|
2 |
+
from zipfile import ZipFile, ZIP_DEFLATED
|
3 |
+
from shutil import rmtree
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from tqdm import tqdm
|
7 |
+
from collections import Counter
|
8 |
+
from pprint import pprint
|
9 |
+
import re
|
10 |
+
import requests
|
11 |
+
from dateutil import parser as date_parser
|
12 |
+
from string import punctuation
|
13 |
+
from copy import deepcopy
|
14 |
+
|
15 |
+
|
16 |
+
def value_in_utt(value, utt):
|
17 |
+
"""return character level (start, end) if value in utt"""
|
18 |
+
value = value.strip(punctuation).lower()
|
19 |
+
utt = utt
|
20 |
+
p = '(^|[\s,\.:\?!-])(?P<v>{})([\s,\.:\?!-\']|$)'.format(re.escape(value))
|
21 |
+
p = re.compile(p, re.I)
|
22 |
+
m = re.search(p, utt)
|
23 |
+
if m:
|
24 |
+
# very few value appears more than once, take the first span
|
25 |
+
return True, m.span('v')
|
26 |
+
else:
|
27 |
+
try:
|
28 |
+
# solve date representation, e.g. '3 pm' vs '3pm'
|
29 |
+
date_parser.parse(value)
|
30 |
+
if (value.endswith('pm') or value.endswith('am')) and ''.join(value.split(' ')) in ''.join(utt.split(' ')):
|
31 |
+
return True, None
|
32 |
+
|
33 |
+
except:
|
34 |
+
if value in utt:
|
35 |
+
# value appears, but may be in the plural, -ing, -ly, etc.
|
36 |
+
return True, None
|
37 |
+
|
38 |
+
return False, None
|
39 |
+
|
40 |
+
|
41 |
+
def preprocess():
|
42 |
+
data_file = "kvret_dataset_public.zip"
|
43 |
+
if not os.path.exists(data_file):
|
44 |
+
response = requests.get("http://nlp.stanford.edu/projects/kvret/kvret_dataset_public.zip")
|
45 |
+
open(data_file, "wb").write(response.content)
|
46 |
+
|
47 |
+
archive = ZipFile(data_file)
|
48 |
+
|
49 |
+
new_data_dir = 'data'
|
50 |
+
|
51 |
+
os.makedirs(new_data_dir, exist_ok=True)
|
52 |
+
|
53 |
+
dataset = 'kvret'
|
54 |
+
splits = ['train', 'validation', 'test']
|
55 |
+
dialogues_by_split = {split:[] for split in splits}
|
56 |
+
|
57 |
+
ontology = {'domains': {},
|
58 |
+
'intents': {
|
59 |
+
'inform': {'description': ''},
|
60 |
+
'request': {'description': ''}
|
61 |
+
},
|
62 |
+
'state': {},
|
63 |
+
'dialogue_acts': {
|
64 |
+
"categorical": {},
|
65 |
+
"non-categorical": {},
|
66 |
+
"binary": {}
|
67 |
+
}}
|
68 |
+
|
69 |
+
domain2slot = {
|
70 |
+
'schedule': ['event', 'time', 'date', 'party', 'room', 'agenda'],
|
71 |
+
'weather': ['location', 'weekly_time', 'temperature', 'weather_attribute'],
|
72 |
+
'navigate': ['poi', 'traffic_info', 'poi_type', 'address', 'distance']
|
73 |
+
}
|
74 |
+
slot2domain = {slot: domain for domain in domain2slot for slot in domain2slot[domain]}
|
75 |
+
|
76 |
+
db = []
|
77 |
+
with archive.open(f'kvret_entities.json') as f:
|
78 |
+
entities = json.load(f)
|
79 |
+
for slot, values in entities.items():
|
80 |
+
domain = slot2domain[slot]
|
81 |
+
ontology['domains'].setdefault(domain, {'description': '', 'slots': {}})
|
82 |
+
if slot == 'poi':
|
83 |
+
for s in ['poi', 'address', 'poi_type']:
|
84 |
+
ontology['domains'][domain]['slots'][s] = {'description': '', 'is_categorical': False, 'possible_values': []}
|
85 |
+
for item in values:
|
86 |
+
poi, address, poi_type = item['poi'], item['address'], item['type']
|
87 |
+
db.append({'poi': poi, 'address': address, 'poi_type': poi_type})
|
88 |
+
for s in ['poi', 'address', 'poi_type']:
|
89 |
+
ontology['domains'][domain]['slots'][s]['possible_values'].append(db[-1][s])
|
90 |
+
continue
|
91 |
+
elif slot == 'weekly_time':
|
92 |
+
slot = 'date'
|
93 |
+
elif slot == 'temperature':
|
94 |
+
values = [f"{x}F" for x in values]
|
95 |
+
elif slot == 'distance':
|
96 |
+
values = [f"{x} miles" for x in values]
|
97 |
+
|
98 |
+
ontology['domains'][domain]['slots'][slot] = {'description': '', 'is_categorical': False, 'possible_values': values}
|
99 |
+
|
100 |
+
for domain in ontology['domains']:
|
101 |
+
for slot in ontology['domains'][domain]['slots']:
|
102 |
+
ontology['domains'][domain]['slots'][slot]['possible_values'] = sorted(list(set(ontology['domains'][domain]['slots'][slot]['possible_values'])))
|
103 |
+
|
104 |
+
for data_split in splits:
|
105 |
+
filename = data_split if data_split != 'validation' else 'dev'
|
106 |
+
with archive.open(f'kvret_{filename}_public.json') as f:
|
107 |
+
data = json.load(f)
|
108 |
+
for item in tqdm(data):
|
109 |
+
if len(item['dialogue']) == 0:
|
110 |
+
continue
|
111 |
+
scenario = item['scenario']
|
112 |
+
domain = scenario['task']['intent']
|
113 |
+
|
114 |
+
slots = scenario['kb']['column_names']
|
115 |
+
db_results = {domain: []}
|
116 |
+
if scenario['kb']['items']:
|
117 |
+
for entry in scenario['kb']['items']:
|
118 |
+
db_results[domain].append({s: entry[s] for s in slots})
|
119 |
+
|
120 |
+
dialogue_id = f'{dataset}-{data_split}-{len(dialogues_by_split[data_split])}'
|
121 |
+
dialogue = {
|
122 |
+
'dataset': dataset,
|
123 |
+
'data_split': data_split,
|
124 |
+
'dialogue_id': dialogue_id,
|
125 |
+
'original_id': f'{data_split}-{len(dialogues_by_split[data_split])}',
|
126 |
+
'domains': [domain],
|
127 |
+
'turns': []
|
128 |
+
}
|
129 |
+
init_state = {domain: {}}
|
130 |
+
|
131 |
+
for turn in item['dialogue']:
|
132 |
+
speaker = 'user' if turn['turn'] == 'driver' else 'system'
|
133 |
+
utt = turn['data']['utterance'].strip()
|
134 |
+
if len(dialogue['turns']) > 0 and speaker == dialogue['turns'][-1]['speaker']:
|
135 |
+
# repeat, skip
|
136 |
+
if utt == dialogue['turns'][-1]['utterance']:
|
137 |
+
continue
|
138 |
+
else:
|
139 |
+
dialogue['turns'].pop(-1)
|
140 |
+
|
141 |
+
dialogue['turns'].append({
|
142 |
+
'speaker': speaker,
|
143 |
+
'utterance': utt,
|
144 |
+
'utt_idx': len(dialogue['turns']),
|
145 |
+
'dialogue_acts': {
|
146 |
+
'binary': [],
|
147 |
+
'categorical': [],
|
148 |
+
'non-categorical': [],
|
149 |
+
},
|
150 |
+
})
|
151 |
+
|
152 |
+
if speaker == 'user':
|
153 |
+
dialogue['turns'][-1]['state'] = deepcopy(init_state)
|
154 |
+
else:
|
155 |
+
user_da = {'binary': [], 'categorical': [], 'non-categorical': []}
|
156 |
+
user_utt = dialogue['turns'][-2]['utterance']
|
157 |
+
|
158 |
+
for slot, value in turn['data']['slots'].items():
|
159 |
+
value = value.strip()
|
160 |
+
is_appear, span = value_in_utt(value, user_utt)
|
161 |
+
if is_appear:
|
162 |
+
if span:
|
163 |
+
start, end = span
|
164 |
+
user_da['non-categorical'].append({
|
165 |
+
'intent': 'inform', 'domain': domain, 'slot': slot, 'value': user_utt[start:end],
|
166 |
+
'start': start, 'end': end
|
167 |
+
})
|
168 |
+
else:
|
169 |
+
user_da['non-categorical'].append({
|
170 |
+
'intent': 'inform', 'domain': domain, 'slot': slot, 'value': value,
|
171 |
+
})
|
172 |
+
init_state[domain][slot] = value
|
173 |
+
ontology['state'].setdefault(domain, {})
|
174 |
+
ontology['state'][domain].setdefault(slot, '')
|
175 |
+
dialogue['turns'][-2]['state'] = deepcopy(init_state)
|
176 |
+
|
177 |
+
for slot, present in turn['data']['requested'].items():
|
178 |
+
if slot not in turn['data']['slots'] and present:
|
179 |
+
user_da['binary'].append({'intent': 'request', 'domain': domain, 'slot': slot})
|
180 |
+
|
181 |
+
dialogue['turns'][-2]['dialogue_acts'] = user_da
|
182 |
+
dialogue['turns'][-1]['db_results'] = db_results
|
183 |
+
|
184 |
+
for da_type in user_da:
|
185 |
+
das = user_da[da_type]
|
186 |
+
for da in das:
|
187 |
+
ontology["dialogue_acts"][da_type].setdefault((da['intent'], da['domain'], da['slot']), {})
|
188 |
+
ontology["dialogue_acts"][da_type][(da['intent'], da['domain'], da['slot'])]['user'] = True
|
189 |
+
|
190 |
+
assert all([s in ontology['domains'][domain]['slots'] for s in turn['data']['requested']]), print(turn['data']['requested'], ontology['domains'][domain]['slots'].keys())
|
191 |
+
assert all([s in ontology['domains'][domain]['slots'] for s in turn['data']['slots']]), print(turn['data']['slots'], ontology['domains'][domain]['slots'].keys())
|
192 |
+
|
193 |
+
dialogues_by_split[data_split].append(dialogue)
|
194 |
+
|
195 |
+
for da_type in ontology['dialogue_acts']:
|
196 |
+
ontology["dialogue_acts"][da_type] = sorted([str({'user': speakers.get('user', False), 'system': speakers.get('system', False), 'intent':da[0],'domain':da[1], 'slot':da[2]}) for da, speakers in ontology["dialogue_acts"][da_type].items()])
|
197 |
+
dialogues = dialogues_by_split['train']+dialogues_by_split['validation']+dialogues_by_split['test']
|
198 |
+
json.dump(dialogues[:10], open(f'dummy_data.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
199 |
+
json.dump(ontology, open(f'{new_data_dir}/ontology.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
200 |
+
json.dump(dialogues, open(f'{new_data_dir}/dialogues.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
201 |
+
json.dump(db, open(f'{new_data_dir}/db.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
202 |
+
with ZipFile('data.zip', 'w', ZIP_DEFLATED) as zf:
|
203 |
+
for filename in os.listdir(new_data_dir):
|
204 |
+
zf.write(f'{new_data_dir}/{filename}')
|
205 |
+
rmtree(new_data_dir)
|
206 |
+
return dialogues, ontology
|
207 |
+
|
208 |
+
|
209 |
+
if __name__ == '__main__':
|
210 |
+
preprocess()
|