File size: 5,867 Bytes
302920f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import importlib.metadata as importlib_metadata
import platform
from functools import lru_cache

import packaging.version
import torch


@lru_cache
def is_bnb_available() -> bool:
    return importlib.util.find_spec("bitsandbytes") is not None


@lru_cache
def is_bnb_4bit_available() -> bool:
    if not is_bnb_available():
        return False

    import bitsandbytes as bnb

    return hasattr(bnb.nn, "Linear4bit")


@lru_cache
def is_auto_gptq_available():
    if importlib.util.find_spec("auto_gptq") is not None:
        AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0")
        version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq"))
        if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq:
            return True
        else:
            raise ImportError(
                f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, "
                f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported"
            )


@lru_cache
def is_gptqmodel_available():
    if importlib.util.find_spec("gptqmodel") is not None:
        GPTQMODEL_MINIMUM_VERSION = packaging.version.parse("2.0.0")
        OPTIMUM_MINIMUM_VERSION = packaging.version.parse("1.24.0")
        version_gptqmodel = packaging.version.parse(importlib_metadata.version("gptqmodel"))
        if GPTQMODEL_MINIMUM_VERSION <= version_gptqmodel:
            if is_optimum_available():
                version_optimum = packaging.version.parse(importlib_metadata.version("optimum"))
                if OPTIMUM_MINIMUM_VERSION <= version_optimum:
                    return True
                else:
                    raise ImportError(
                        f"gptqmodel requires optimum version `{OPTIMUM_MINIMUM_VERSION}` or higher. Found version `{version_optimum}`, "
                        f"but only versions above `{OPTIMUM_MINIMUM_VERSION}` are supported"
                    )
            else:
                raise ImportError(
                    f"gptqmodel requires optimum version `{OPTIMUM_MINIMUM_VERSION}` or higher to be installed."
                )
        else:
            raise ImportError(
                f"Found an incompatible version of gptqmodel. Found version `{version_gptqmodel}`, "
                f"but only versions above `{GPTQMODEL_MINIMUM_VERSION}` are supported"
            )


@lru_cache
def is_optimum_available() -> bool:
    return importlib.util.find_spec("optimum") is not None


@lru_cache
def is_torch_tpu_available(check_device=True):
    "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
    if importlib.util.find_spec("torch_xla") is not None:
        if check_device:
            # We need to check if `xla_device` can be found, will raise a RuntimeError if not
            try:
                import torch_xla.core.xla_model as xm

                _ = xm.xla_device()
                return True
            except RuntimeError:
                return False
        return True
    return False


@lru_cache
def is_aqlm_available():
    return importlib.util.find_spec("aqlm") is not None


@lru_cache
def is_auto_awq_available():
    return importlib.util.find_spec("awq") is not None


@lru_cache
def is_eetq_available():
    return importlib.util.find_spec("eetq") is not None


@lru_cache
def is_hqq_available():
    return importlib.util.find_spec("hqq") is not None


@lru_cache
def is_inc_available():
    return importlib.util.find_spec("neural_compressor") is not None


@lru_cache
def is_torchao_available():
    if importlib.util.find_spec("torchao") is None:
        return False

    TORCHAO_MINIMUM_VERSION = packaging.version.parse("0.4.0")
    try:
        torchao_version = packaging.version.parse(importlib_metadata.version("torchao"))
    except importlib_metadata.PackageNotFoundError:
        # Same idea as in diffusers:
        # https://github.com/huggingface/diffusers/blob/9f06a0d1a4a998ac6a463c5be728c892f95320a8/src/diffusers/utils/import_utils.py#L351-L357
        # It's not clear under what circumstances `importlib_metadata.version("torchao")` can raise an error even
        # though `importlib.util.find_spec("torchao") is not None` but it has been observed, so adding this for
        # precaution.
        return False

    if torchao_version < TORCHAO_MINIMUM_VERSION:
        raise ImportError(
            f"Found an incompatible version of torchao. Found version {torchao_version}, "
            f"but only versions above {TORCHAO_MINIMUM_VERSION} are supported"
        )
    return True


@lru_cache
def is_xpu_available(check_device=False):
    """
    Checks if XPU acceleration is available and potentially if a XPU is in the environment
    """

    system = platform.system()
    if system == "Darwin":
        return False
    else:
        if check_device:
            try:
                # Will raise a RuntimeError if no XPU is found
                _ = torch.xpu.device_count()
                return torch.xpu.is_available()
            except RuntimeError:
                return False
        return hasattr(torch, "xpu") and torch.xpu.is_available()


@lru_cache
def is_diffusers_available():
    return importlib.util.find_spec("diffusers") is not None