sharpenb commited on
Commit
e14fcff
·
verified ·
1 Parent(s): f8353a0

Upload folder using huggingface_hub (#2)

Browse files

- c95272f7bcde11cf9eca87ee52fe987d5ba2e4a4c0198ffdf15c015967698b52 (1387583cf7134886172b984c72bbae9a19a60fa1)
- a7dc08bfc8f89b6f46aa54d61dfa7df151b7a31000e0af583dd207fb524151c3 (61d887edba85f86b3f30883f121ff626d2061cae)
- fa10a81a739f977cb477c10469225ce7aac7dfd88f2f3a3bd5bbc7107a213123 (eb1ea0bb2fe5119ff81d7df691568dede80ff749)

Files changed (2) hide show
  1. config.json +1 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmp9agve87rhk1qdeqe",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpnjnmpexwtgv_h_vq",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmp9agve87r",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpnjnmpexw",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}