|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import importlib |
|
import importlib.metadata as importlib_metadata |
|
from functools import lru_cache |
|
|
|
import packaging.version |
|
|
|
|
|
def is_bnb_available() -> bool: |
|
return importlib.util.find_spec("bitsandbytes") is not None |
|
|
|
|
|
def is_bnb_4bit_available() -> bool: |
|
if not is_bnb_available(): |
|
return False |
|
|
|
import bitsandbytes as bnb |
|
|
|
return hasattr(bnb.nn, "Linear4bit") |
|
|
|
|
|
def is_auto_gptq_available(): |
|
if importlib.util.find_spec("auto_gptq") is not None: |
|
AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") |
|
version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) |
|
if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: |
|
return True |
|
else: |
|
raise ImportError( |
|
f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " |
|
f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" |
|
) |
|
|
|
|
|
def is_optimum_available() -> bool: |
|
return importlib.util.find_spec("optimum") is not None |
|
|
|
|
|
@lru_cache |
|
def is_torch_tpu_available(check_device=True): |
|
"Checks if `torch_xla` is installed and potentially if a TPU is in the environment" |
|
if importlib.util.find_spec("torch_xla") is not None: |
|
if check_device: |
|
|
|
try: |
|
import torch_xla.core.xla_model as xm |
|
|
|
_ = xm.xla_device() |
|
return True |
|
except RuntimeError: |
|
return False |
|
return True |
|
return False |
|
|
|
|
|
def is_aqlm_available(): |
|
return importlib.util.find_spec("aqlm") is not None |
|
|
|
|
|
def is_auto_awq_available(): |
|
return importlib.util.find_spec("awq") is not None |
|
|