Spaces:
Running
Running
File size: 3,235 Bytes
10318fd 86d92de f011c00 86d92de 10318fd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
"""Module containing performance results for the Virtuoso-Medium model."""
results_virtuoso_medium = {
"name": "Virtuoso-Medium",
"modelType": "Qwen2.5 32B",
"configurations": [
{
"instanceType": "r8g.4xlarge",
"quantization": "Q4_0_4_8",
"container": "llama.cpp 11/27/24",
"status": "OK",
"tokensPerSecond": "10.5",
"notes": "-fa",
},
{
"instanceType": "g5.12xlarge",
"quantization": "none",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "23",
"notes": '"OPTION_MAX_MODEL_LEN": "16384",\n"TENSOR_PARALLEL_DEGREE": "max",',
},
{
"instanceType": "g6.12xlarge",
"quantization": "none",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "14",
"notes": '"OPTION_MAX_MODEL_LEN": "16384",\n"TENSOR_PARALLEL_DEGREE": "max",',
},
{
"instanceType": "g6e.12xlarge",
"quantization": "none",
"container": "vLLM0.6.4.post1",
"status": "OK",
"tokensPerSecond": "36",
"notes": "--tensor-parallel-size 4 --max-model-len 16384",
},
{
"instanceType": "g6e.12xlarge (2 GPUs)",
"quantization": "none",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "21",
"notes": "--tensor-parallel-size 2 --max-model-len 16384",
},
{
"instanceType": "g6e.48xlarge",
"quantization": "none",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "48",
"notes": "",
},
{
"instanceType": "p4d.24xlarge",
"quantization": "none",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "72.5",
"notes": '"OPTION_MAX_MODEL_LEN": "32768",\n"TENSOR_PARALLEL_DEGREE": "max",',
},
{
"instanceType": "p5.48xlarge",
"quantization": "none",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "117",
"notes": "--tensor-parallel-size 8",
},
{
"instanceType": "p5.48xlarge (4 GPUs)",
"quantization": "none",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "88",
"notes": "--tensor-parallel-size 4",
},
{
"instanceType": "p5.48xlarge (2 GPUs)",
"quantization": "none",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "58",
"notes": "--tensor-parallel-size 2",
},
{
"instanceType": "p5.48xlarge (1 GPU)",
"quantization": "none",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "38",
"notes": "--tensor-parallel-size 1",
},
],
}
|