File size: 3,193 Bytes
8c29929
 
 
 
0e4db24
8c29929
 
005ca50
54462ee
4ff790f
 
8c29929
 
 
 
 
4ff790f
 
8c29929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b36b05b
 
 
8c29929
 
 
7c250b1
8c29929
 
 
 
 
 
 
 
 
 
 
 
 
 
99f9d8f
538ae45
 
8c29929
 
 
 
 
bce8dfa
8c29929
 
a9a4148
8c29929
1c714f0
5b3292b
8923724
a9a4148
8c29929
a9a4148
8c29929
0e4db24
8c29929
f5c0c14
8c29929
30104fc
8c29929
 
65d7508
64b9a1b
 
 
8c29929
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
{
    "name": "cpu_inference_transformers_token-classification_microsoft/deberta-v3-base",
    "backend": {
        "name": "pytorch",
        "version": "2.5.1+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "token-classification",
        "library": "transformers",
        "model_type": "deberta-v2",
        "model": "microsoft/deberta-v3-base",
        "processor": "microsoft/deberta-v3-base",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 2,
            "sequence_length": 16,
            "num_choices": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": null,
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16766.779392,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.8.0-1017-azure-x86_64-with-glibc2.39",
        "processor": "x86_64",
        "python_version": "3.10.15",
        "optimum_benchmark_version": "0.5.0.dev0",
        "optimum_benchmark_commit": "fc224307c41301d024a8231662a117210adcc42c",
        "transformers_version": "4.47.0",
        "transformers_commit": null,
        "accelerate_version": "1.2.0",
        "accelerate_commit": null,
        "diffusers_version": "0.31.0",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.12",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    },
    "print_report": true,
    "log_report": true
}