|
---
|
|
pipeline_tag: text-generation
|
|
inference: false
|
|
license: apache-2.0
|
|
datasets:
|
|
- codeparrot/github-code-clean
|
|
- bigcode/starcoderdata
|
|
|
|
|
|
- open-web-math/open-web-math
|
|
- math-ai/StackMathQA
|
|
|
|
|
|
|
|
metrics:
|
|
- code_eval
|
|
library_name: transformers
|
|
tags:
|
|
- code
|
|
- granite
|
|
model-index:
|
|
- name: granite-3b-code-base
|
|
results:
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: mbpp
|
|
name: MBPP
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 36.0
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: evalplus/mbppplus
|
|
name: MBPP+
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 45.1
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalSynthesis(Python)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 36.6
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalSynthesis(JavaScript)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 37.2
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalSynthesis(Java)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 40.9
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalSynthesis(Go)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 26.2
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalSynthesis(C++)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 35.4
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalSynthesis(Rust)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 22.0
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalExplain(Python)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 25.0
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalExplain(JavaScript)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 18.9
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalExplain(Java)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 29.9
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalExplain(Go)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 17.1
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalExplain(C++)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 26.8
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalExplain(Rust)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 14.0
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalFix(Python)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 18.3
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalFix(JavaScript)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 23.2
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalFix(Java)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 29.9
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalFix(Go)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 24.4
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalFix(C++)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 16.5
|
|
veriefied: false
|
|
- task:
|
|
type: text-generation
|
|
dataset:
|
|
type: bigcode/humanevalpack
|
|
name: HumanEvalFix(Rust)
|
|
metrics:
|
|
- name: pass@1
|
|
type: pass@1
|
|
value: 3.7
|
|
veriefied: false
|
|
---
|
|
|
|
![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png)
|
|
|
|
# ibm-granite/granite-3b-code-base-GGUF
|
|
This is the Q4_K_M converted version of the original [`ibm-granite/granite-3b-code-base`](https://huggingface.co/ibm-granite/granite-3b-code-base).
|
|
Refer to the [original model card](https://huggingface.co/ibm-granite/granite-3b-code-base) for more details.
|
|
|
|
## Use with llama.cpp
|
|
```shell
|
|
git clone https://github.com/ggerganov/llama.cpp
|
|
cd llama.cpp
|
|
|
|
# install
|
|
make
|
|
|
|
# run generation
|
|
./main -m granite-3b-code-base-GGUF/granite-3b-code-base.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color
|
|
```
|
|
|