Yoshiii commited on
Commit
42e83e7
1 Parent(s): 09125e9

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. libbitsandbytes_cuda116.dll +3 -0
  3. main.py +412 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ libbitsandbytes_cuda116.dll filter=lfs diff=lfs merge=lfs -text
libbitsandbytes_cuda116.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88f7bd2916ca3effc43f88492f1e1b9088d13cb5be3b4a3a4aede6aa3bf8d412
3
+ size 4724224
main.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ extract factors the build is dependent on:
3
+ [X] compute capability
4
+ [ ] TODO: Q - What if we have multiple GPUs of different makes?
5
+ - CUDA version
6
+ - Software:
7
+ - CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
8
+ - CuBLAS-LT: full-build 8-bit optimizer
9
+ - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
10
+
11
+ evaluation:
12
+ - if paths faulty, return meaningful error
13
+ - else:
14
+ - determine CUDA version
15
+ - determine capabilities
16
+ - based on that set the default path
17
+ """
18
+
19
+ import ctypes as ct
20
+ import os
21
+ import errno
22
+ import torch
23
+ from warnings import warn
24
+
25
+ from pathlib import Path
26
+ from typing import Set, Union
27
+ from .env_vars import get_potentially_lib_path_containing_env_vars
28
+
29
+ CUDA_RUNTIME_LIB: str = "libcudart.so"
30
+
31
+ class CUDASetup:
32
+ _instance = None
33
+
34
+ def __init__(self):
35
+ raise RuntimeError("Call get_instance() instead")
36
+
37
+ def generate_instructions(self):
38
+ if self.cuda is None:
39
+ self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
40
+ self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
41
+ self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
42
+ self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
43
+ self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
44
+ self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
45
+ return
46
+
47
+ if self.cudart_path is None:
48
+ self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
49
+ self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
50
+ self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
51
+ self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
52
+ self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
53
+ self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
54
+ self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
55
+ self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
56
+ self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
57
+ return
58
+
59
+ make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
60
+ if len(self.cuda_version_string) < 3:
61
+ make_cmd += ' make cuda92'
62
+ elif self.cuda_version_string == '110':
63
+ make_cmd += ' make cuda110'
64
+ elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
65
+ make_cmd += ' make cuda11x'
66
+ elif self.cuda_version_string == '100':
67
+ self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
68
+ self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
69
+ return
70
+
71
+
72
+ has_cublaslt = is_cublasLt_compatible(self.cc)
73
+ if not has_cublaslt:
74
+ make_cmd += '_nomatmul'
75
+
76
+ self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
77
+ self.add_log_entry('git clone git@github.com:TimDettmers/bitsandbytes.git')
78
+ self.add_log_entry('cd bitsandbytes')
79
+ self.add_log_entry(make_cmd)
80
+ self.add_log_entry('python setup.py install')
81
+
82
+ def initialize(self):
83
+ if not getattr(self, 'initialized', False):
84
+ self.has_printed = False
85
+ self.lib = None
86
+ self.initialized = False
87
+
88
+ def run_cuda_setup(self):
89
+ self.initialized = True
90
+ self.cuda_setup_log = []
91
+
92
+ binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
93
+ self.cudart_path = cudart_path
94
+ self.cuda = cuda
95
+ self.cc = cc
96
+ self.cuda_version_string = cuda_version_string
97
+
98
+ package_dir = Path(__file__).parent.parent
99
+ binary_path = package_dir / binary_name
100
+
101
+ try:
102
+ if not binary_path.exists():
103
+ self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
104
+ legacy_binary_name = "libbitsandbytes_cpu.so"
105
+ self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
106
+ binary_path = package_dir / legacy_binary_name
107
+ if not binary_path.exists() or torch.cuda.is_available():
108
+ self.add_log_entry('')
109
+ self.add_log_entry('='*48 + 'ERROR' + '='*37)
110
+ self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
111
+ self.add_log_entry('1. CUDA driver not installed')
112
+ self.add_log_entry('2. CUDA not installed')
113
+ self.add_log_entry('3. You have multiple conflicting CUDA libraries')
114
+ self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
115
+ self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
116
+ self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
117
+ self.add_log_entry('='*80)
118
+ self.add_log_entry('')
119
+ self.generate_instructions()
120
+ self.print_log_stack()
121
+ raise Exception('CUDA SETUP: Setup Failed!')
122
+ self.lib = ct.cdll.LoadLibrary(str(binary_path))
123
+ else:
124
+ self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
125
+ self.lib = ct.cdll.LoadLibrary(str(binary_path))
126
+ except Exception as ex:
127
+ self.add_log_entry(str(ex))
128
+ self.print_log_stack()
129
+
130
+ def add_log_entry(self, msg, is_warning=False):
131
+ self.cuda_setup_log.append((msg, is_warning))
132
+
133
+ def print_log_stack(self):
134
+ for msg, is_warning in self.cuda_setup_log:
135
+ if is_warning:
136
+ warn(msg)
137
+ else:
138
+ print(msg)
139
+
140
+ @classmethod
141
+ def get_instance(cls):
142
+ if cls._instance is None:
143
+ cls._instance = cls.__new__(cls)
144
+ cls._instance.initialize()
145
+ return cls._instance
146
+
147
+
148
+ def is_cublasLt_compatible(cc):
149
+ has_cublaslt = False
150
+ if cc is not None:
151
+ cc_major, cc_minor = cc.split('.')
152
+ if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
153
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
154
+ else:
155
+ has_cublaslt = True
156
+ return has_cublaslt
157
+
158
+ def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
159
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
160
+
161
+
162
+ def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
163
+ existent_directories: Set[Path] = set()
164
+ for path in candidate_paths:
165
+ try:
166
+ if path.exists():
167
+ existent_directories.add(path)
168
+ except OSError as exc:
169
+ if exc.errno != errno.ENAMETOOLONG:
170
+ raise exc
171
+
172
+ non_existent_directories: Set[Path] = candidate_paths - existent_directories
173
+ if non_existent_directories:
174
+ CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
175
+ f"be non-existent: {non_existent_directories}", is_warning=True)
176
+
177
+ return existent_directories
178
+
179
+
180
+ def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
181
+ return {
182
+ path / CUDA_RUNTIME_LIB
183
+ for path in candidate_paths
184
+ if (path / CUDA_RUNTIME_LIB).is_file()
185
+ }
186
+
187
+
188
+ def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
189
+ """
190
+ Searches a given environmental var for the CUDA runtime library,
191
+ i.e. `libcudart.so`.
192
+ """
193
+ return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
194
+
195
+
196
+ def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
197
+ return get_cuda_runtime_lib_paths(
198
+ resolve_paths_list(paths_list_candidate)
199
+ )
200
+
201
+
202
+ def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
203
+ if len(results_paths) > 1:
204
+ warning_msg = (
205
+ f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
206
+ "We'll flip a coin and try one of these, in order to fail forward.\n"
207
+ "Either way, this might cause trouble in the future:\n"
208
+ "If you get `CUDA error: invalid device function` errors, the above "
209
+ "might be the cause and the solution is to make sure only one "
210
+ f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.")
211
+ CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
212
+
213
+
214
+ def determine_cuda_runtime_lib_path() -> Union[Path, None]:
215
+ """
216
+ Searches for a cuda installations, in the following order of priority:
217
+ 1. active conda env
218
+ 2. LD_LIBRARY_PATH
219
+ 3. any other env vars, while ignoring those that
220
+ - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
221
+ - don't contain the path separator `/`
222
+
223
+ If multiple libraries are found in part 3, we optimistically try one,
224
+ while giving a warning message.
225
+ """
226
+ candidate_env_vars = get_potentially_lib_path_containing_env_vars()
227
+
228
+ if "CONDA_PREFIX" in candidate_env_vars:
229
+ conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
230
+
231
+ conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
232
+ warn_in_case_of_duplicates(conda_cuda_libs)
233
+
234
+ if conda_cuda_libs:
235
+ return next(iter(conda_cuda_libs))
236
+
237
+ CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
238
+ f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
239
+
240
+ if "LD_LIBRARY_PATH" in candidate_env_vars:
241
+ lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
242
+
243
+ if lib_ld_cuda_libs:
244
+ return next(iter(lib_ld_cuda_libs))
245
+ warn_in_case_of_duplicates(lib_ld_cuda_libs)
246
+
247
+ CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
248
+ f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
249
+
250
+ remaining_candidate_env_vars = {
251
+ env_var: value for env_var, value in candidate_env_vars.items()
252
+ if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
253
+ }
254
+
255
+ cuda_runtime_libs = set()
256
+ for env_var, value in remaining_candidate_env_vars.items():
257
+ cuda_runtime_libs.update(find_cuda_lib_in(value))
258
+
259
+ if len(cuda_runtime_libs) == 0:
260
+ CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...')
261
+ cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
262
+
263
+ warn_in_case_of_duplicates(cuda_runtime_libs)
264
+
265
+ return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
266
+
267
+
268
+ def check_cuda_result(cuda, result_val):
269
+ # 3. Check for CUDA errors
270
+ if result_val != 0:
271
+ error_str = ct.c_char_p()
272
+ cuda.cuGetErrorString(result_val, ct.byref(error_str))
273
+ if error_str.value is not None:
274
+ CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
275
+ else:
276
+ CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
277
+
278
+
279
+ # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
280
+ def get_cuda_version(cuda, cudart_path):
281
+ if cuda is None: return None
282
+
283
+ try:
284
+ cudart = ct.CDLL(cudart_path)
285
+ except OSError:
286
+ CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
287
+ return None
288
+
289
+ version = ct.c_int()
290
+ try:
291
+ check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
292
+ except AttributeError as e:
293
+ CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
294
+ CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
295
+ CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
296
+ version = int(version.value)
297
+ major = version//1000
298
+ minor = (version-(major*1000))//10
299
+
300
+ if major < 11:
301
+ CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
302
+
303
+ return f'{major}{minor}'
304
+
305
+
306
+ def get_cuda_lib_handle():
307
+ # 1. find libcuda.so library (GPU driver) (/usr/lib)
308
+ try:
309
+ cuda = ct.CDLL("libcuda.so")
310
+ except OSError:
311
+ CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
312
+ return None
313
+ check_cuda_result(cuda, cuda.cuInit(0))
314
+
315
+ return cuda
316
+
317
+
318
+ def get_compute_capabilities(cuda):
319
+ """
320
+ 1. find libcuda.so library (GPU driver) (/usr/lib)
321
+ init_device -> init variables -> call function by reference
322
+ 2. call extern C function to determine CC
323
+ (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
324
+ 3. Check for CUDA errors
325
+ https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
326
+ # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
327
+ """
328
+
329
+ nGpus = ct.c_int()
330
+ cc_major = ct.c_int()
331
+ cc_minor = ct.c_int()
332
+
333
+ device = ct.c_int()
334
+
335
+ check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus)))
336
+ ccs = []
337
+ for i in range(nGpus.value):
338
+ check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i))
339
+ ref_major = ct.byref(cc_major)
340
+ ref_minor = ct.byref(cc_minor)
341
+ # 2. call extern C function to determine CC
342
+ check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
343
+ ccs.append(f"{cc_major.value}.{cc_minor.value}")
344
+
345
+ return ccs
346
+
347
+
348
+ # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
349
+ def get_compute_capability(cuda):
350
+ """
351
+ Extracts the highest compute capbility from all available GPUs, as compute
352
+ capabilities are downwards compatible. If no GPUs are detected, it returns
353
+ None.
354
+ """
355
+ if cuda is None: return None
356
+
357
+ # TODO: handle different compute capabilities; for now, take the max
358
+ ccs = get_compute_capabilities(cuda)
359
+ if ccs: return ccs[-1]
360
+
361
+
362
+ def evaluate_cuda_setup():
363
+ if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
364
+ print('')
365
+ print('='*35 + 'BUG REPORT' + '='*35)
366
+ print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
367
+ print('='*80)
368
+ if torch.cuda.is_available(): return 'libbitsandbytes_cuda116.dll', None, None, None, None
369
+
370
+ cuda_setup = CUDASetup.get_instance()
371
+ cudart_path = determine_cuda_runtime_lib_path()
372
+ cuda = get_cuda_lib_handle()
373
+ cc = get_compute_capability(cuda)
374
+ cuda_version_string = get_cuda_version(cuda, cudart_path)
375
+
376
+ failure = False
377
+ if cudart_path is None:
378
+ failure = True
379
+ cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
380
+ else:
381
+ cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
382
+
383
+ if cc == '' or cc is None:
384
+ failure = True
385
+ cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
386
+ else:
387
+ cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
388
+
389
+ if cuda is None:
390
+ failure = True
391
+ else:
392
+ cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
393
+
394
+ # 7.5 is the minimum CC vor cublaslt
395
+ has_cublaslt = is_cublasLt_compatible(cc)
396
+
397
+ # TODO:
398
+ # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
399
+ # (2) Multiple CUDA versions installed
400
+
401
+ # we use ls -l instead of nvcc to determine the cuda version
402
+ # since most installations will have the libcudart.so installed, but not the compiler
403
+
404
+ if failure:
405
+ binary_name = "libbitsandbytes_cpu.so"
406
+ elif has_cublaslt:
407
+ binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
408
+ else:
409
+ "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
410
+ binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
411
+
412
+ return binary_name, cudart_path, cuda, cc, cuda_version_string