IlyaGusev commited on
Commit
95876e3
1 Parent(s): bd9bd5f

V2 initial

Browse files
adapter_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "base_model_name_or_path": "decapoda-research/llama-7b-hf",
3
  "bias": "none",
4
  "fan_in_fan_out": false,
5
  "inference_mode": true,
 
1
  {
2
+ "base_model_name_or_path": "huggyllama/llama-7b",
3
  "bias": "none",
4
  "fan_in_fan_out": false,
5
  "inference_mode": true,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1929a4ec5b10b9a4fb61f34d18882205b89df8270444a035a19e00ce3673e19f
3
  size 67201357
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49c13a07e164ad6b0bbb8afe3cbe14c1664d729bc63bcf0d00b9c2a6b2392d04
3
  size 67201357
special_tokens_map.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "bos_token": "<s>",
3
- "eos_token": "</s>",
4
- "pad_token": "<unk>",
5
- "sep_token": "<s>",
6
- "unk_token": "<unk>"
7
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "sep_token": "<s>",
6
+ "unk_token": "<unk>"
7
  }
tokenizer_config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "bos_token": "<s>",
3
- "clean_up_tokenization_spaces": false,
4
- "eos_token": "</s>",
5
- "model_max_length": 2048,
6
- "padding_side": "left",
7
- "special_tokens_map_file": "special_tokens_map.json",
8
- "tokenizer_class": "LlamaTokenizer",
9
- "unk_token": "<unk>"
10
  }
 
1
  {
2
+ "tokenizer_class": "LlamaTokenizer",
3
+ "model_max_length": 2048,
4
+ "padding_side": "left",
5
+ "bos_token": "<s>",
6
+ "eos_token": "</s>",
7
+ "unk_token": "<unk>",
8
+ "clean_up_tokenization_spaces": false,
9
+ "special_tokens_map_file": "special_tokens_map.json"
10
  }
training_config.json CHANGED
@@ -27,7 +27,7 @@
27
  "load_in_8bit": true,
28
  "only_target_loss": true,
29
  "mode": "chat",
30
- "templates_path": "ru_saiga_template.json",
31
  "model_name": "models/llama-7b",
32
  "model_type": "causal",
33
  "max_tokens_count": 2000
 
27
  "load_in_8bit": true,
28
  "only_target_loss": true,
29
  "mode": "chat",
30
+ "templates_path": "internal_prompts/saiga_v2.json",
31
  "model_name": "models/llama-7b",
32
  "model_type": "causal",
33
  "max_tokens_count": 2000