KaeriJenti commited on
Commit
a71554e
·
1 Parent(s): ecd0d06

model upload

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config.json +44 -0
  2. configuration_qwen.py +71 -0
  3. cpp_kernels.py +55 -0
  4. generation_config.json +15 -0
  5. model-00001-of-00162.safetensors +3 -0
  6. model-00002-of-00162.safetensors +3 -0
  7. model-00003-of-00162.safetensors +3 -0
  8. model-00004-of-00162.safetensors +3 -0
  9. model-00005-of-00162.safetensors +3 -0
  10. model-00006-of-00162.safetensors +3 -0
  11. model-00007-of-00162.safetensors +3 -0
  12. model-00008-of-00162.safetensors +3 -0
  13. model-00009-of-00162.safetensors +3 -0
  14. model-00010-of-00162.safetensors +3 -0
  15. model-00011-of-00162.safetensors +3 -0
  16. model-00012-of-00162.safetensors +3 -0
  17. model-00013-of-00162.safetensors +3 -0
  18. model-00014-of-00162.safetensors +3 -0
  19. model-00015-of-00162.safetensors +3 -0
  20. model-00016-of-00162.safetensors +3 -0
  21. model-00017-of-00162.safetensors +3 -0
  22. model-00018-of-00162.safetensors +3 -0
  23. model-00019-of-00162.safetensors +3 -0
  24. model-00020-of-00162.safetensors +3 -0
  25. model-00021-of-00162.safetensors +3 -0
  26. model-00022-of-00162.safetensors +3 -0
  27. model-00023-of-00162.safetensors +3 -0
  28. model-00024-of-00162.safetensors +3 -0
  29. model-00025-of-00162.safetensors +3 -0
  30. model-00026-of-00162.safetensors +3 -0
  31. model-00027-of-00162.safetensors +3 -0
  32. model-00028-of-00162.safetensors +3 -0
  33. model-00029-of-00162.safetensors +3 -0
  34. model-00030-of-00162.safetensors +3 -0
  35. model-00031-of-00162.safetensors +3 -0
  36. model-00032-of-00162.safetensors +3 -0
  37. model-00033-of-00162.safetensors +3 -0
  38. model-00034-of-00162.safetensors +3 -0
  39. model-00035-of-00162.safetensors +3 -0
  40. model-00036-of-00162.safetensors +3 -0
  41. model-00037-of-00162.safetensors +3 -0
  42. model-00038-of-00162.safetensors +3 -0
  43. model-00039-of-00162.safetensors +3 -0
  44. model-00040-of-00162.safetensors +3 -0
  45. model-00041-of-00162.safetensors +3 -0
  46. model-00042-of-00162.safetensors +3 -0
  47. model-00043-of-00162.safetensors +3 -0
  48. model-00044-of-00162.safetensors +3 -0
  49. model-00045-of-00162.safetensors +3 -0
  50. model-00046-of-00162.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/data04/ysd4/llama/Qwen-72B",
3
+ "architectures": [
4
+ "QWenLMHeadModel"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_qwen.QWenConfig",
9
+ "AutoModel": "modeling_qwen.QWenLMHeadModel",
10
+ "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
11
+ },
12
+ "bf16": false,
13
+ "emb_dropout_prob": 0.0,
14
+ "fp16": true,
15
+ "fp32": false,
16
+ "hidden_size": 8192,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 49152,
19
+ "kv_channels": 128,
20
+ "layer_norm_epsilon": 1e-06,
21
+ "max_position_embeddings": 32768,
22
+ "model_type": "qwen",
23
+ "no_bias": true,
24
+ "num_attention_heads": 64,
25
+ "num_hidden_layers": 80,
26
+ "onnx_safe": null,
27
+ "rope_theta": 1000000,
28
+ "rotary_emb_base": 1000000,
29
+ "rotary_pct": 1.0,
30
+ "scale_attn_weights": true,
31
+ "seq_length": 32768,
32
+ "softmax_in_fp32": false,
33
+ "tie_word_embeddings": false,
34
+ "tokenizer_class": "QWenTokenizer",
35
+ "torch_dtype": "float16",
36
+ "transformers_version": "4.34.1",
37
+ "use_cache": true,
38
+ "use_cache_kernel": false,
39
+ "use_cache_quantization": false,
40
+ "use_dynamic_ntk": false,
41
+ "use_flash_attn": true,
42
+ "use_logn_attn": false,
43
+ "vocab_size": 152064
44
+ }
configuration_qwen.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from transformers import PretrainedConfig
7
+
8
+
9
+ class QWenConfig(PretrainedConfig):
10
+ model_type = "qwen"
11
+ keys_to_ignore_at_inference = ["past_key_values"]
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size=151936,
16
+ hidden_size=4096,
17
+ num_hidden_layers=32,
18
+ num_attention_heads=32,
19
+ emb_dropout_prob=0.0,
20
+ attn_dropout_prob=0.0,
21
+ layer_norm_epsilon=1e-6,
22
+ initializer_range=0.02,
23
+ max_position_embeddings=8192,
24
+ scale_attn_weights=True,
25
+ use_cache=True,
26
+ bf16=False,
27
+ fp16=False,
28
+ fp32=False,
29
+ kv_channels=128,
30
+ rotary_pct=1.0,
31
+ rotary_emb_base=10000,
32
+ use_dynamic_ntk=True,
33
+ use_logn_attn=True,
34
+ use_flash_attn="auto",
35
+ intermediate_size=22016,
36
+ no_bias=True,
37
+ tie_word_embeddings=False,
38
+ use_cache_quantization=False,
39
+ use_cache_kernel=False,
40
+ softmax_in_fp32=False,
41
+ **kwargs,
42
+ ):
43
+ self.vocab_size = vocab_size
44
+ self.hidden_size = hidden_size
45
+ self.intermediate_size = intermediate_size
46
+ self.num_hidden_layers = num_hidden_layers
47
+ self.num_attention_heads = num_attention_heads
48
+ self.emb_dropout_prob = emb_dropout_prob
49
+ self.attn_dropout_prob = attn_dropout_prob
50
+ self.layer_norm_epsilon = layer_norm_epsilon
51
+ self.initializer_range = initializer_range
52
+ self.scale_attn_weights = scale_attn_weights
53
+ self.use_cache = use_cache
54
+ self.max_position_embeddings = max_position_embeddings
55
+ self.bf16 = bf16
56
+ self.fp16 = fp16
57
+ self.fp32 = fp32
58
+ self.kv_channels = kv_channels
59
+ self.rotary_pct = rotary_pct
60
+ self.rotary_emb_base = rotary_emb_base
61
+ self.use_dynamic_ntk = use_dynamic_ntk
62
+ self.use_logn_attn = use_logn_attn
63
+ self.use_flash_attn = use_flash_attn
64
+ self.no_bias = no_bias
65
+ self.use_cache_quantization = use_cache_quantization
66
+ self.use_cache_kernel = use_cache_kernel
67
+ self.softmax_in_fp32 = softmax_in_fp32
68
+ super().__init__(
69
+ tie_word_embeddings=tie_word_embeddings,
70
+ **kwargs
71
+ )
cpp_kernels.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils import cpp_extension
2
+ import pathlib
3
+ import os
4
+ import subprocess
5
+
6
+ def _get_cuda_bare_metal_version(cuda_dir):
7
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
8
+ universal_newlines=True)
9
+ output = raw_output.split()
10
+ release_idx = output.index("release") + 1
11
+ release = output[release_idx].split(".")
12
+ bare_metal_major = release[0]
13
+ bare_metal_minor = release[1][0]
14
+
15
+ return raw_output, bare_metal_major, bare_metal_minor
16
+
17
+ def _create_build_dir(buildpath):
18
+ try:
19
+ os.mkdir(buildpath)
20
+ except OSError:
21
+ if not os.path.isdir(buildpath):
22
+ print(f"Creation of the build directory {buildpath} failed")
23
+
24
+ # Check if cuda 11 is installed for compute capability 8.0
25
+ cc_flag = []
26
+ _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
27
+ if int(bare_metal_major) >= 11:
28
+ cc_flag.append('-gencode')
29
+ cc_flag.append('arch=compute_80,code=sm_80')
30
+ if int(bare_metal_minor) >= 7:
31
+ cc_flag.append('-gencode')
32
+ cc_flag.append('arch=compute_90,code=sm_90')
33
+
34
+ # Build path
35
+ srcpath = pathlib.Path(__file__).parent.absolute()
36
+ buildpath = srcpath / 'build'
37
+ _create_build_dir(buildpath)
38
+
39
+ def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
40
+ return cpp_extension.load(
41
+ name=name,
42
+ sources=sources,
43
+ build_directory=buildpath,
44
+ extra_cflags=['-O3', ],
45
+ extra_cuda_cflags=['-O3',
46
+ '-gencode', 'arch=compute_70,code=sm_70',
47
+ '--use_fast_math'] + extra_cuda_flags + cc_flag,
48
+ verbose=1
49
+ )
50
+
51
+ extra_flags = []
52
+
53
+ cache_autogptq_cuda_256_sources = ["./cache_autogptq_cuda_256.cpp",
54
+ "./cache_autogptq_cuda_kernel_256.cu"]
55
+ cache_autogptq_cuda_256 = _cpp_extention_load_helper("cache_autogptq_cuda_256", cache_autogptq_cuda_256_sources, extra_flags)
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chat_format": "raw",
3
+ "do_sample": true,
4
+ "eos_token_id": 151643,
5
+ "max_new_tokens": 512,
6
+ "pad_token_id": 151643,
7
+ "stop_words_ids": [
8
+ [
9
+ 151643
10
+ ]
11
+ ],
12
+ "top_k": 0,
13
+ "top_p": 0.8,
14
+ "transformers_version": "4.34.1"
15
+ }
model-00001-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c2c0cbc5c661fbf926a59dc4f61c22e8241b8fa610b13e5a6338efa8add2cd
3
+ size 2491416712
model-00002-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe141c7efd46bfb1e54f7819771a1fbada6c4feab1c154564c292e930bb1983f
3
+ size 939606672
model-00003-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b00be35ad7547ba722a6dfe148df3abeda6c7e39e8ce55dbb12e0335c92cd51f
3
+ size 805323104
model-00004-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29eecfa6621b7d6b8509e72fd59929ba0d28b91cd8682902d36ce3ad9da7153d
3
+ size 939590192
model-00005-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:383dd555f9cf515c774f79fdae93c95a8cea0b194e277572f55baaae9db36fff
3
+ size 805323104
model-00006-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1046a9861391c9ea878db11bf54507eb197b50cd9b6338f1e0dfce6be6cf6ef0
3
+ size 939590192
model-00007-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301d90da63af5a6aac8f0d4055d0d79281ed8b887918aa0a3e4b8dc6c799ad28
3
+ size 805323104
model-00008-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:004ac816039458ea54753219cd3301ee8e5be8d3f4d401912bcad29d42d8c6f2
3
+ size 939590192
model-00009-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ebe03d8e7e6a42fbdcb714f20baba67c8a85d46d4e4b9405c50a653e887454d
3
+ size 805323104
model-00010-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b26df13b1119abb204c69515929e955f748e24fcfc2fde3d3f72e9f5273ae73
3
+ size 939590192
model-00011-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a1312c0d69d65a6bcaaa19a3e1143260f3927b2ace4baf0ed0dc20ce563a7a4
3
+ size 805323104
model-00012-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ca873a5fc42bc29d62843869a17e40b4435f82cbb63eb7c4b5e79a8f86e3701
3
+ size 939590192
model-00013-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf2ee53bf11b9a7a820a4440bdcc97dbfeb82dc7b8c4cbe1fb856d032907f687
3
+ size 805323104
model-00014-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb1e4b4b235af2131a1f272d0fa1fbdd6b4b6069d0e01881405cea6f3b45dd81
3
+ size 939590192
model-00015-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a076df233722df0884a9cfc0a892d65c1f7ce8d2be515449897bb587f2f689
3
+ size 805323104
model-00016-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b067e38235385662bf563f7e545a632ab1bbd849ab6b020d293888f13b09bbcc
3
+ size 939590192
model-00017-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca09d7cddf9b2aec0e1c122182b9bf595299c54f8b0d9457574e486e3c199bb9
3
+ size 805323104
model-00018-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a435728bbe8b3314c48e7178e753c13e4a0d4db2b5b0c7e76d52015352af27a
3
+ size 939590192
model-00019-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1f322d92ee837645ea2fbe121ef7272f8b68f6cf51c7b35251507959c34aad
3
+ size 805323104
model-00020-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63fa8f6ea9f88d2f390ed01d2d93432b91c69cd058b15fa46de2a079a0c668b8
3
+ size 939590192
model-00021-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc615249b8c6df9ec0c5b82069401deb1487fb782869fdae91ae5699df826eb0
3
+ size 805323096
model-00022-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a4de09bb9d125ad6f3c67acb6926340328082627418d9afcc6830f23a4361c
3
+ size 939590192
model-00023-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62dbe2203990155c7ec3e9b4a87105dc0c8c7addddd299f6485e48bc5bbc733
3
+ size 805323104
model-00024-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:747584b0cc78f98e8ff398518846de5c4404786d0ec4b9b47a7509bf068e9596
3
+ size 939590192
model-00025-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d466e745c2a64206602e4a3900275e2bf167db4fcea1d7c43bdfe388ece400b
3
+ size 805323104
model-00026-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:858f7a96c503539fb1262164d10106d3cb5419177f05c23c3c061f3614391490
3
+ size 939590192
model-00027-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70fd1cce68239ce06a5e71497e460f0567df62be2a943a11b3996445a04bd68a
3
+ size 805323104
model-00028-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c86d3e1d2f7ff82f32a23c666cf90b74b77c943aee759a4d3b5792fc1c0cb22d
3
+ size 939590192
model-00029-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b5bc2a4e707521b4902c561e43f05cfdf32cb4bf80b4f4a0ff2ebbf51063964
3
+ size 805323104
model-00030-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5d76c01d27fb6c77b3dfe368a6a3408e95c4cb5abb3ebc0481a2d02214c08f2
3
+ size 939590192
model-00031-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c7cd12f8faefc2e7177e9e039edc0eb0e1bf463c64e9536fc8a57b3aa1e655
3
+ size 805323104
model-00032-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dcb13b7b355cff9100aad1b86e322aa65582d18f086b66d05e0400e5709f5d7
3
+ size 939590192
model-00033-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:605670bc7373c9d73e83916c32cdb024a7406082cad20b2c0a153f722a56fe76
3
+ size 805323104
model-00034-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a920b96946b2e984941c17db460a14c560718ba6124bb61c4eda31456df98d1
3
+ size 939590192
model-00035-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf4ff7ac3e1ffbbe4cf9bd607ca5731f35ec42a909930a42d85f91aaa14be7bb
3
+ size 805323104
model-00036-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c83da519cee9de7d14ff1ebacc885e735e0ab9e7e588105cb4d9db3554cdc31c
3
+ size 939590192
model-00037-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af786946b2cd84b084d545bfc14e8000d5fee9f29af9d3589dbb94420c180901
3
+ size 805323104
model-00038-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b7a24693ca1457d48f696e11c35caecfc335ba66e45e736633a22bcac12dc2
3
+ size 939590192
model-00039-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8b3352588fb17fed9fff827f3b5812393ac79a80f1e320c1953c11c93aaebf6
3
+ size 805323104
model-00040-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1df49b1c471c9e0e39564b59b9d9188e5f97e46961cb7eff3f2c00dfbb3de96
3
+ size 939590192
model-00041-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2483206e26d077cdb44b3dbfb6efd2bf2c5ed27d6a258e2f0a45080279330b
3
+ size 805323104
model-00042-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8441e90341c1229cc7d835ce364af103fd830a21ef1416db4a791f6c1734c228
3
+ size 939590192
model-00043-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96b0f22f953c5c4b9c3b10ebedc1298dac62a37e6afdb953ea92c8e1f17fec83
3
+ size 805323104
model-00044-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f18203b36d2027e837b126308d8adb7ea9c489c8e5c1f8a748c0cf4a2ad47c
3
+ size 939590192
model-00045-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:262c0f911495a9b4451a10731a6cd4b3978d629e2022e6ee12a68a7b71dfde39
3
+ size 805323104
model-00046-of-00162.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6da1d99db87a56b3cf225af665a8dd83b011dd4dab236468d416e82ae6e46c5c
3
+ size 939590192