-
Notifications
You must be signed in to change notification settings - Fork 32
/
Copy pathsetup.py
155 lines (132 loc) · 5.03 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import re
from io import open
import os
from setuptools import find_packages, setup
import sys
from functools import lru_cache
os.environ["CC"] = "g++"
os.environ["CXX"] = "g++"
try:
filepath = "./auto_round/version.py"
with open(filepath) as version_file:
(__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
except Exception as error:
assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
version = __version__
# All BUILD_* flags are initially set to `False`` and
# will be updated to `True` if the corresponding environment check passes.
PYPI_RELEASE = os.environ.get("PYPI_RELEASE", None)
BUILD_HPU_ONLY = os.environ.get("BUILD_HPU_ONLY", "0") == "1"
@lru_cache(None)
def is_habana_framework_installed():
"""Check if Habana framework is installed.
Only check for the habana_frameworks package without importing it to avoid
initializing lazy-mode-related components.
"""
from importlib.util import find_spec
package_spec = find_spec("habana_frameworks")
return package_spec is not None
@lru_cache(None)
def is_hpu_available():
try:
import habana_frameworks.torch.core as htcore # pylint: disable=E0401
return True
except ImportError:
return False
if is_hpu_available() or is_habana_framework_installed():
# When HPU is available, we build HPU only by default
BUILD_HPU_ONLY = True
def is_cpu_env():
try:
import torch
except Exception as e:
print(
f"Building extension requires PyTorch being installed, please install PyTorch first: {e}.\n NOTE: This issue may be raised due to pip build isolation system (ignoring local packages). Please use `--no-build-isolation` when installing with pip, and refer to https://github.com/intel/auto-round for more details.")
sys.exit(1)
if torch.cuda.is_available():
return False
try:
import habana_frameworks.torch.core as htcore
return False
except:
return True
def fetch_requirements(path):
requirements = []
with open(path, "r") as fd:
requirements = [r.strip() for r in fd.readlines()]
return requirements
PKG_INSTALL_CFG = {
"include_packages": find_packages(
include=[
"auto_round",
"auto_round.*",
"auto_round_extension",
"auto_round_extension.*",
],
),
"install_requires": fetch_requirements("requirements.txt"),
"extras_require": {
"gpu": fetch_requirements("requirements-gpu.txt"),
"cpu": fetch_requirements("requirements-cpu.txt"),
},
}
###############################################################################
# Configuration for auto_round_lib
# From pip:
# pip install auto-round-lib
# From source:
# python setup.py lib install
###############################################################################
LIB_REQUIREMENTS_FILE = "requirements-lib.txt"
LIB_INSTALL_CFG = {
"include_packages": find_packages(
include=[
"auto_round",
"auto_round.*",
"auto_round_extension",
"auto_round_extension.*",
],
),
"install_requires": fetch_requirements(LIB_REQUIREMENTS_FILE),
}
if __name__ == "__main__":
# There are two ways to install hpu-only package:
# 1. python setup.py lib install
# 2. Within the gaudi docker where the HPU is available, we install the auto_round_lib by default.
is_user_requesting_library_build = "lib" in sys.argv
if is_user_requesting_library_build:
sys.argv.remove("lib")
should_build_library = is_user_requesting_library_build or BUILD_HPU_ONLY
if should_build_library:
package_name = "auto_round_lib"
INSTALL_CFG = LIB_INSTALL_CFG
else:
package_name = "auto_round"
INSTALL_CFG = PKG_INSTALL_CFG
include_packages = INSTALL_CFG.get("include_packages", {})
install_requires = INSTALL_CFG.get("install_requires", [])
extras_require = INSTALL_CFG.get("extras_require", {})
setup(
name=package_name,
author="Intel AIPT Team",
version=version,
description="Repository of AutoRound: Advanced Weight-Only Quantization Algorithm for LLMs",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="quantization,auto-around,LLM,SignRound",
license="Apache 2.0",
url="https://github.com/intel/auto-round",
packages=include_packages,
install_requires=install_requires,
extras_require=extras_require,
python_requires=">=3.7.0",
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
],
include_package_data=True,
package_data={"": ["mllm/templates/*.json"]},
)