25
25
26
26
def is_cuda_available ():
27
27
try :
28
+ os .system ("pip install torch" )
28
29
import torch
29
30
30
31
return torch .cuda .is_available ()
@@ -111,6 +112,15 @@ def detect_local_sm_architectures():
111
112
return arch_list
112
113
113
114
115
+ def detect_hardware ():
116
+ if is_hpu_available ():
117
+ return "requirements-hpu.txt"
118
+ elif is_cuda_available ():
119
+ return "requirements.txt"
120
+ else :
121
+ return "requirements-cpu.txt"
122
+
123
+
114
124
UNSUPPORTED_COMPUTE_CAPABILITIES = ['3.5' , '3.7' , '5.0' , '5.2' , '5.3' ]
115
125
116
126
if BUILD_CUDA_EXT :
@@ -219,11 +229,7 @@ def detect_local_sm_architectures():
219
229
"auto_round_extension.*" ,
220
230
],
221
231
),
222
- "install_requires" : fetch_requirements ("requirements.txt" ),
223
- "extras_require" : {
224
- "hpu" : fetch_requirements ("requirements-hpu.txt" ),
225
- "cpu" : fetch_requirements ("requirements-cpu.txt" ),
226
- },
232
+ "install_requires" : fetch_requirements (detect_hardware ()),
227
233
}
228
234
229
235
if __name__ == "__main__" :
@@ -248,7 +254,6 @@ def detect_local_sm_architectures():
248
254
url = "https://github.com/intel/auto-round" ,
249
255
packages = include_packages ,
250
256
include_dirs = include_dirs ,
251
- ##include_package_data=False,
252
257
install_requires = install_requires ,
253
258
extras_require = extras_require ,
254
259
python_requires = ">=3.7.0" ,
0 commit comments