File size: 3,192 Bytes
a6326c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
from utils import parse, write_jsonl_file, read_json_file
import os


def build_wiki_index(wiki_dir):
    idx2path = dict()

    for wiki_filename in os.listdir(wiki_dir):
        wiki_path = os.path.join(wiki_dir, wiki_filename)
        wiki_data = read_json_file(wiki_path)

        wiki_idx = wiki_data["wikiDocumentIdx"]
        idx2path[wiki_idx] = wiki_path

    return idx2path


def merge_consecutive_turns(history):
    new_history = []
    cur_turn = None
    for turn in history:
        if cur_turn is None:
            cur_turn = turn
            cur_turn["docIdx"] = [cur_turn["docIdx"]]
        else:
            if cur_turn["uid"] == turn["uid"]:
                cur_turn["text"] += " " + turn["text"].strip()
                if turn["docIdx"] not in cur_turn["docIdx"]:
                    cur_turn["docIdx"].append(turn["docIdx"])
            else:
                new_history.append(cur_turn)
                cur_turn = turn
                cur_turn["docIdx"] = [cur_turn["docIdx"]]
    if cur_turn is not None:
        new_history.append(cur_turn)

    return new_history


def load_train_files(args):
    conv_dir = os.path.join(args.input_dir, "Conversations")
    input_dir = os.path.join(conv_dir, "train")
    filenames = [filename for filename in os.listdir(input_dir)]

    return filenames


def preprocess(args, split, train_files=None):
    wiki_dir = os.path.join(args.input_dir, "WikiData")
    conv_dir = os.path.join(args.input_dir, "Conversations")

    idx2path = build_wiki_index(wiki_dir)
    input_dir = os.path.join(conv_dir, split)

    if split == "valid":
        split = "dev"
    output_file = os.path.join(args.output_dir, f"{split}.jsonl")

    processed_data = []

    def flip_role(role):
        if role == "user1":
            return "user2"
        elif role == "user2":
            return "user1"
        else:
            raise ValueError(f"Unknown role: {role}")

    for filename in os.listdir(input_dir):
        if train_files is not None and filename in train_files:
            continue
        filepath = os.path.join(input_dir, filename)
        data = read_json_file(filepath)
        history = data["history"]

        history = merge_consecutive_turns(history)

        dialog = {"turn": "multi", "locale": "en", "dialog": []}

        wikipath = idx2path[data["wikiDocumentIdx"]]
        wikidata = read_json_file(wikipath)

        flip = False
        for idx, turn in enumerate(history):
            if idx == 0 and turn["uid"] == "user2":
                flip = True
            dialog["dialog"].append(
                {
                    "roles": [turn["uid"]] if not flip else [flip_role(turn["uid"])],
                    "utterance": turn["text"],
                    "knowledge_to_select": list(map(str, turn["docIdx"])),
                }
            )
        dialog["knowledge"] = {"type": "dict", "value": wikidata}

        processed_data.append(dialog)

    write_jsonl_file(processed_data, output_file)


if __name__ == "__main__":
    args = parse()
    train_files = load_train_files(args)
    preprocess(args, "train")
    preprocess(args, "valid", train_files)
    preprocess(args, "test", train_files)