nehcgs commited on
Commit
aa4d8e7
1 Parent(s): 4838f72

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -2,16 +2,16 @@
2
  license: other
3
  license_name: katanemo-research
4
  license_link: >-
5
- https://huggingface.co/katanemolabs/Arch-Function-1.5B/blob/main/LICENSE
6
  base_model:
7
- - Qwen/Qwen2.5-1.5B-Instruct
8
  language:
9
  - en
10
  pipeline_tag: text-generation
11
  library_name: transformers
12
  ---
13
 
14
- # katanemo/Arch-Function-1.5B
15
 
16
  ## Overview
17
  The Katanemo Arch-Function collection of large language models (LLMs) is a collection state-of-the-art (SOTA) LLMs specifically designed for **function calling** tasks. The models are designed to understand complex function signatures, identify required parameters, and produce accurate function call outputs based on natural language prompts. Achieving performance on par with GPT-4, these models set a new benchmark in the domain of function-oriented tasks, making them suitable for scenarios where automated API interaction and function execution is crucial.
@@ -189,7 +189,7 @@ We evaluate Katanemo Arch-Function series on the [Berkeley Function-Calling Lead
189
 
190
 
191
  # Requirements
192
- The code of Arch-Function-1.5B has been in the Hugging Face `transformers` library and we advise you to install latest version:
193
  ```bash
194
  pip install transformers>=4.37.0
195
  ```
@@ -205,7 +205,7 @@ import json
205
  from typing import Any, Dict, List
206
  from transformers import AutoModelForCausalLM, AutoTokenizer
207
 
208
- model_name = "katanemo/Arch-Function-1.5B"
209
  model = AutoModelForCausalLM.from_pretrained(
210
  model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True
211
  )
@@ -344,4 +344,4 @@ The current temperature in Seattle is 62 degrees in Fahrenheit.
344
 
345
 
346
  # License
347
- Katanemo Arch-Function collection is distributed under the [Katanemo license](https://huggingface.co/katanemolabs/Arch-Function-1.5B/blob/main/LICENSE).
 
2
  license: other
3
  license_name: katanemo-research
4
  license_link: >-
5
+ https://huggingface.co/katanemolabs/Arch-Function-7B/blob/main/LICENSE
6
  base_model:
7
+ - Qwen/Qwen2.5-Coder-7B-Instruct
8
  language:
9
  - en
10
  pipeline_tag: text-generation
11
  library_name: transformers
12
  ---
13
 
14
+ # katanemo/Arch-Function-7B
15
 
16
  ## Overview
17
  The Katanemo Arch-Function collection of large language models (LLMs) is a collection state-of-the-art (SOTA) LLMs specifically designed for **function calling** tasks. The models are designed to understand complex function signatures, identify required parameters, and produce accurate function call outputs based on natural language prompts. Achieving performance on par with GPT-4, these models set a new benchmark in the domain of function-oriented tasks, making them suitable for scenarios where automated API interaction and function execution is crucial.
 
189
 
190
 
191
  # Requirements
192
+ The code of Arch-Function-7B has been in the Hugging Face `transformers` library and we advise you to install latest version:
193
  ```bash
194
  pip install transformers>=4.37.0
195
  ```
 
205
  from typing import Any, Dict, List
206
  from transformers import AutoModelForCausalLM, AutoTokenizer
207
 
208
+ model_name = "katanemo/Arch-Function-7B"
209
  model = AutoModelForCausalLM.from_pretrained(
210
  model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True
211
  )
 
344
 
345
 
346
  # License
347
+ Katanemo Arch-Function collection is distributed under the [Katanemo license](https://huggingface.co/katanemolabs/Arch-Function-7B/blob/main/LICENSE).
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "Qwen/Qwen2.5-Coder-7B-Instruct",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
generation_config.json CHANGED
@@ -6,7 +6,7 @@
6
  151643
7
  ],
8
  "pad_token_id": 151643,
9
- "repetition_penalty": 1.05,
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
 
6
  151643
7
  ],
8
  "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4a3225f3bd8bd745f9e3bcf9d3f85740fa2e0f4f7ef128a34cb7e600b23d696
3
  size 4877660776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8abb832df26607133070162af8b38bde0b957721ab2f0ae4a0bec3a9b1a034c
3
  size 4877660776
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b702310b1513c2e44d589060678b27ad1c5c86b2084da2e94a86a2de44b63567
3
  size 4932751008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b01fada6c3fcd7787462e62cbbbb847e5868540176bd373743c2c0c4b8f248cf
3
  size 4932751008
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:235c7ff384c0a922e7dfc469f04f513d9f973a79040e2c91cfa24ec9d1c8ffe5
3
  size 4330865200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:557ad1e1ef465fd0c38914b6f302f834b55f31379a812d8372b7970340ce3cbf
3
  size 4330865200
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8327b8350d03d01aeee592f961468bcee38a343ce658071f09ccb17f76ea36b
3
  size 1089994880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5b8d144bc96bff9ad28b710421d34accc6461c5b5ce479c47ab26a429d9969b
3
  size 1089994880
tokenizer_config.json CHANGED
@@ -199,9 +199,9 @@
199
  "clean_up_tokenization_spaces": false,
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
202
- "model_max_length": 131072,
203
  "pad_token": "<|endoftext|>",
204
  "split_special_tokens": false,
205
  "tokenizer_class": "Qwen2Tokenizer",
206
  "unk_token": null
207
- }
 
199
  "clean_up_tokenization_spaces": false,
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
202
+ "model_max_length": 32768,
203
  "pad_token": "<|endoftext|>",
204
  "split_special_tokens": false,
205
  "tokenizer_class": "Qwen2Tokenizer",
206
  "unk_token": null
207
+ }