Text Generation
Transformers
PyTorch
English
mixtral
conversational
Inference Endpoints
text-generation-inference
File size: 1,406 Bytes
56e15b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#!/bin/bash

MODEL_PATH="/workspace/text-generation-webui/models/dolphin-mixtral"
MODEL_NAME="dolphin-2.7-mixtral-8x7b"
RESULTS_PATH="/workspace/results/$MODEL_NAME"
mkdir -p "$RESULTS_PATH"

PRETRAINED_ARGS="$MODEL_PATH,tensor_parallel_size=4,dtype=auto,trust_remote_code=True,gpu_memory_utilization=0.8"
MODEL_ARGS="pretrained=$PRETRAINED_ARGS"

tasks=(
    "mmlu" 
    "truthfulqa" 
    "gsm8k" 
    "hellaswag" 
    "arc_challenge" 
    "winogrande")

# Function to get the number of fewshot for a given task
get_num_fewshot() {
    case "$1" in
        "mmlu")          echo 5 ;;
        "truthfulqa")    echo 0 ;;
        "gsm8k")         echo 5 ;;
        "hellaswag")     echo 10 ;;
        "arc_challenge") echo 25 ;;
        "winogrande")    echo 5 ;;
        *)               echo 0 ;;
    esac
}

for TASK in "${tasks[@]}"; do
    echo lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
    lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
done


jq -s '[.[]]' $RESULTS_PATH/*.json > $RESULTS_PATH/eval_results.json

huggingface-cli upload cognitivecomputations/$MODEL_NAME $RESULTS_PATH/eval_results.json
huggingface-cli upload cognitivecomputations/$MODEL_NAME eval.sh