File size: 8,690 Bytes
a6326c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
from const import (
    SUMMARY,
    EMOTIONS,
    EMOTION,
    UTTERANCE,
    ASPECTS,
    TARGET,
    VALUE,
    OPINION,
    SENTIMENT,
    CATEGORY,
    CHARACTERS,
    DIALOG,
    START,
    END,
    BELIEF_STATE,
    DOMAIN,
    INFORMED_SLOT_VALUE_TABLE,
    SLOT,
    VALUES,
    RELATION,
    SQL,
    SLOT_VALUE_TABLE,
    SLOTS_TO_FILL,
    ROLE_RELATIONS,
    REWRITTEN,
    ROLES_TO_SELECT,
    ACTIVE_INTENTS,
    TRAIN_SPLIT,
    OPTION_LABEL,
    CANDIDATES,
)
from typing import Dict
import re
import random
import copy
import json


def extract_summary(dial: Dict, **kwargs):
    """
    `dial` is the full dialog.
    """
    return dial[SUMMARY]


def extract_turn_emotion(turn: Dict, sep: str, **kwargs):
    if EMOTIONS not in turn:
        return None
    return sep.join(map(lambda x: x[EMOTION], turn[EMOTIONS]))


def extract_turn_emotion_wrapper(sep: str):
    def extract_turn_emotion_func(turn: Dict, **kwargs):
        return extract_turn_emotion(turn, sep)

    return extract_turn_emotion_func


def extract_turn_utterance(turn: Dict, **kwargs):
    return turn[UTTERANCE]


def extract_aspects(turn: Dict, ext_aspect_sep: str, int_aspect_sep: str):
    if not turn[ASPECTS]:
        return "None"

    aspects = turn[ASPECTS]

    tgt_seq = []
    for aspect in aspects:
        aspect_seq = []
        if TARGET in aspect:
            aspect_seq.append(aspect[TARGET][VALUE])
        if CATEGORY in aspect:
            aspect_seq.append(aspect[CATEGORY])

        if OPINION in aspect:
            aspect_seq.append(aspect[OPINION][VALUE])

        if SENTIMENT in aspect:
            aspect_seq.append(aspect[SENTIMENT])

        tgt_seq.append(int_aspect_sep.join(aspect_seq))

    return ext_aspect_sep.join(tgt_seq)


def extract_aspects_wrapper(ext_aspect_sep: str, int_aspect_sep: str):
    def extract_aspects_func(turn: Dict, **kwargs):
        return extract_aspects(turn, ext_aspect_sep, int_aspect_sep)

    return extract_aspects_func


def rebuild_utterance_with_characters(turn: Dict, split):
    if split == "train":
        utterance = turn[UTTERANCE]
        parts = []
        pre = 0

        for character in turn[CHARACTERS]:
            parts.append(utterance[pre : character[START]])
            parts.append(
                f"[{utterance[character[START]: character[END]]} | {character[VALUE]}]"
            )
            pre = character[END]

        parts.append(utterance[pre:])
        return "".join(parts)

    else:
        tuples = []
        for character in turn[CHARACTERS]:
            tuples.append(f"{character[VALUE]}, {character[START]}, {character[END]}")

        if not tuples:
            return "None"
        return " | ".join(tuples)


def extract_characters(example):
    for turn_id, turn in enumerate(example[DIALOG]):
        if CHARACTERS not in turn:
            continue

        for character in turn[CHARACTERS]:
            yield turn_id, character[VALUE], (character[END],)


def extract_belief_state(
    turn,
    value_sep,
    domain_sep,
    slot_sep,
    domain_prompt_op,
    ontology=None,
    do_train=True,
):
    domain_bs = dict()
    bs = turn[BELIEF_STATE]

    # spare_bs = {domain: {slot for slot in ontology[domain]} for domain in ontology}

    for state in bs:
        domain = state[DOMAIN]
        if domain not in domain_bs:
            domain_bs[domain] = dict()

        if INFORMED_SLOT_VALUE_TABLE not in state:
            continue

        for svp in state[INFORMED_SLOT_VALUE_TABLE]:
            slot = svp[SLOT]
            values = svp[VALUES]
            relation = svp[RELATION]

            if slot not in domain_bs[domain]:
                domain_bs[domain][slot] = {"relation": relation, "values": []}
            domain_bs[domain][slot]["values"] += list(map(lambda x: x[VALUE], values))

            # spare_bs[domain].remove(slot)

    domain_bs_list = []
    for domain in domain_bs:
        svp_list = []
        for slot in domain_bs[domain]:
            val_str = value_sep.join(domain_bs[domain][slot]["values"])
            svp_list.append(f"{slot} {domain_bs[domain][slot]['relation']} {val_str}")

        # control whether to add spare slots
        # for slot in sorted(spare_bs[domain]):
        #     svp_list.append(f"{slot} = None")
        if not svp_list:
            continue
        if do_train:
            # shuffle for training
            random.shuffle(svp_list)

        # append a slot separator at the end to alleviate the problem of end point prediction of T5
        svt_str = slot_sep.join(svp_list) + slot_sep

        domain_bs_list.append(f"{domain}{domain_prompt_op}{svt_str.strip()}")

    if not domain_bs_list:
        return "None"

    return domain_sep.join(domain_bs_list)


def extract_belief_state_wrapper(value_sep, domain_sep, slot_sep, domain_prompt_op):
    def extract_belief_state_func(turn, ontology, do_train=True, **kwargs):
        return extract_belief_state(
            turn,
            value_sep,
            domain_sep,
            slot_sep,
            domain_prompt_op,
            ontology,
            do_train=do_train,
        )

    return extract_belief_state_func


def normalize(query: str) -> str:
    def comma_fix(s):
        # Remove spaces in front of commas
        return s.replace(" , ", ", ")

    def white_space_fix(s):
        # Remove double and triple spaces
        return " ".join(s.split())

    def lower(s):
        # Convert everything except text between (single or double) quotation marks to lower case
        return re.sub(
            r"\b(?<!['\"])(\w+)(?!['\"])\b", lambda match: match.group(1).lower(), s
        )

    def space_fix(sql: str):
        def agg_fix(sql: str):
            return re.sub(
                r"(count|max|min|sum|avg)\s\(",
                lambda match: match.group(0).replace(" ", ""),
                sql,
            )

        def brackets_fix(sql: str):
            sql = re.sub(r"\(\s", lambda match: match.group(0)[:-1], sql)
            sql = re.sub(r"\s\)", lambda match: match.group(0)[1:], sql)

            return sql

        def double_chars_op_fix(sql: str):
            return re.sub(
                r"((>|<|!)\s=)",
                lambda match: match.group(0).replace(" ", ""),
                sql,
            )

        return double_chars_op_fix(brackets_fix(agg_fix(sql)))

    return space_fix(comma_fix(white_space_fix(lower(query))))


def extract_sql(turn, split):
    if SQL not in turn:
        return None
    _normalize = normalize if split == "train" else (lambda x: x)
    return _normalize(turn[SQL])


def extract_slots_without_intents(turn, value_sep, slot_sep):
    if SLOTS_TO_FILL not in turn or not turn[SLOTS_TO_FILL][SLOT_VALUE_TABLE]:
        return "None"
    slots = []
    for svp in turn[SLOTS_TO_FILL][SLOT_VALUE_TABLE]:
        slots.append(
            svp[SLOT]
            + " "
            + svp[RELATION]
            + " "
            + value_sep.join(map(lambda x: x[VALUE], svp[VALUES]))
        )

    return (slot_sep.join(slots) + slot_sep).strip()


def extract_slots_without_intents_wrapper(value_sep, slot_sep):
    def extract_slots_without_intents_func(turn, **kwargs):
        return extract_slots_without_intents(turn, value_sep, slot_sep)

    return extract_slots_without_intents_func


def extract_role_relation_without_turn(dialog, relation_sep):
    return relation_sep.join(map(lambda x: x[RELATION], dialog[ROLE_RELATIONS]))


def extract_role_relation_without_turn_wrapper(relation_sep):
    def extract_role_relation_without_turn_func(dialog, **kwargs):
        return extract_role_relation_without_turn(dialog, relation_sep)

    return extract_role_relation_without_turn_func


def extrac_rewritten(turn, **kwargs):
    if REWRITTEN not in turn:
        return None
    return turn[REWRITTEN]


def extract_options(turn, knowledge, split=None):
    if ROLES_TO_SELECT not in turn:
        return None
    if split == TRAIN_SPLIT:
        return knowledge[turn[ROLES_TO_SELECT][0]]
    else:
        return json.dumps(
            {OPTION_LABEL: turn[ROLES_TO_SELECT][0], CANDIDATES: knowledge}
        )


# def extract_roles_wrapper(role_sep):
#     def extract_roles_func(turn, knowledge, split=None):
#         return extract_options(turn, know)

#     return extract_roles_func


def extract_intents(turn, intent_sep):
    if not turn[ACTIVE_INTENTS]:
        return "None"
    return intent_sep.join(
        map(lambda intent: intent.replace("_", " "), turn[ACTIVE_INTENTS])
    )


def extract_intents_wrapper(intent_sep):
    def extract_intents_func(turn, **kwargs):
        return extract_intents(turn, intent_sep)

    return extract_intents_func