File size: 1,400 Bytes
8f5a2a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# ################################
# Model: GPT2LMHeadModel +  NLL
# Authors:
    # Pooneh Mousavi 2023
# ################################


# URL for the gpt2 model
gpt_hub: gpt2
gpt_folder:  gpt2_result/save/gpt_checkpoint
# Special tokens
bos_token: "BOS"
eos_token: "EOS"

system_token: "SPK_1"
user_token: "SPK_2"

tokenizer: !ref <gpt_hub>

additional_special_tokens: [
    !ref <system_token>,
    !ref <user_token>
]

special_tokens: [
    !ref <bos_token>,
    !ref <eos_token>,
    !ref <system_token>,
    !ref <user_token>
]

attr_to_special_tokens:
    "bos_token": !ref <bos_token>
    "eos_token": !ref <eos_token>
    "additional_special_tokens": !ref <additional_special_tokens>

# history_window, i.e. how many user-system exchanges consider as context.
max_history: 5

# decoder setting
freeze_gptmodel: True
num_beams: 3
max_new_tokens: 50
top_k: 45
top_p: 0.9

# gpt model
model: !new:custom.HuggingFaceGPT_expanded
    source: !ref <gpt_hub>
    freeze: !ref <freeze_gptmodel>
    save_path: !ref <gpt_folder>
    max_new_tokens: !ref <max_new_tokens>
    num_beams: !ref <num_beams>
    top_k: !ref  <top_k>
    top_p: !ref <top_p>



# Masks
padding_mask: !name:speechbrain.lobes.models.transformer.Transformer.get_key_padding_mask

pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
    loadables:
        model: !ref <model>

modules:
    model: !ref <model>