File size: 3,273 Bytes
8b4f35f
 
 
 
fb31f7a
8b4f35f
 
d7d0ae4
18217ba
3350d7e
 
8b4f35f
 
 
 
 
3350d7e
 
8b4f35f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb31f7a
 
 
8b4f35f
 
 
fe044d8
8b4f35f
 
 
 
 
 
 
 
 
 
 
 
 
 
11d8031
d9b3021
 
8b4f35f
 
 
 
 
fb31f7a
8b4f35f
 
cddf868
8b4f35f
5ac05de
8b4f35f
 
 
 
 
fc8c926
8b4f35f
5a5b117
8b4f35f
5a5b117
8b4f35f
fb31f7a
8b4f35f
a29de43
8b4f35f
c89c1ce
8b4f35f
5a5b117
5147abd
c4cfe63
 
 
8b4f35f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
{
    "name": "cuda_inference_transformers_image-classification_google/vit-base-patch16-224",
    "backend": {
        "name": "pytorch",
        "version": "2.5.1+cu124",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "image-classification",
        "library": "transformers",
        "model_type": "vit",
        "model": "google/vit-base-patch16-224",
        "processor": "google/vit-base-patch16-224",
        "device": "cuda",
        "device_ids": "0",
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 2,
            "sequence_length": 16,
            "num_choices": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": true,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7R32",
        "cpu_count": 16,
        "cpu_ram_mb": 66697.248768,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-5.10.228-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.12",
        "gpu": [
            "NVIDIA A10G"
        ],
        "gpu_count": 1,
        "gpu_vram_mb": 24146608128,
        "optimum_benchmark_version": "0.5.0.dev0",
        "optimum_benchmark_commit": null,
        "transformers_version": "4.47.0",
        "transformers_commit": null,
        "accelerate_version": "1.2.0",
        "accelerate_commit": null,
        "diffusers_version": "0.31.0",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.12",
        "timm_commit": null,
        "peft_version": "0.14.0",
        "peft_commit": null
    },
    "print_report": true,
    "log_report": true
}