setup.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. import sys
  2. sys.path.append('/home/nvidia/newdisk/hkpc/software')
  3. import distutils.command.clean
  4. import distutils.spawn
  5. import glob
  6. import os
  7. import shutil
  8. import subprocess
  9. import sys
  10. import torch
  11. from pkg_resources import DistributionNotFound, get_distribution, parse_version
  12. from setuptools import find_packages, setup
  13. from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
  14. def read(*names, **kwargs):
  15. with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
  16. return fp.read()
  17. def get_dist(pkgname):
  18. try:
  19. return get_distribution(pkgname)
  20. except DistributionNotFound:
  21. return None
  22. cwd = os.path.dirname(os.path.abspath(__file__))
  23. version_txt = os.path.join(cwd, "version.txt")
  24. with open(version_txt) as f:
  25. version = f.readline().strip()
  26. sha = "Unknown"
  27. package_name = "torchvision"
  28. try:
  29. sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
  30. except Exception:
  31. pass
  32. if os.getenv("BUILD_VERSION"):
  33. version = os.getenv("BUILD_VERSION")
  34. elif sha != "Unknown":
  35. version += "+" + sha[:7]
  36. def write_version_file():
  37. version_path = os.path.join(cwd, "torchvision", "version.py")
  38. with open(version_path, "w") as f:
  39. f.write(f"__version__ = '{version}'\n")
  40. f.write(f"git_version = {repr(sha)}\n")
  41. f.write("from torchvision.extension import _check_cuda_version\n")
  42. f.write("if _check_cuda_version() > 0:\n")
  43. f.write(" cuda = _check_cuda_version()\n")
  44. pytorch_dep = "torch"
  45. if os.getenv("PYTORCH_VERSION"):
  46. pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
  47. requirements = [
  48. "numpy",
  49. "requests",
  50. pytorch_dep,
  51. ]
  52. # Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
  53. pillow_ver = " >= 5.3.0, !=8.3.*"
  54. pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
  55. requirements.append(pillow_req + pillow_ver)
  56. def find_library(name, vision_include):
  57. this_dir = os.path.dirname(os.path.abspath(__file__))
  58. build_prefix = os.environ.get("BUILD_PREFIX", None)
  59. is_conda_build = build_prefix is not None
  60. library_found = False
  61. conda_installed = False
  62. lib_folder = None
  63. include_folder = None
  64. library_header = f"{name}.h"
  65. # Lookup in TORCHVISION_INCLUDE or in the package file
  66. package_path = [os.path.join(this_dir, "torchvision")]
  67. for folder in vision_include + package_path:
  68. candidate_path = os.path.join(folder, library_header)
  69. library_found = os.path.exists(candidate_path)
  70. if library_found:
  71. break
  72. if not library_found:
  73. print(f"Running build on conda-build: {is_conda_build}")
  74. if is_conda_build:
  75. # Add conda headers/libraries
  76. if os.name == "nt":
  77. build_prefix = os.path.join(build_prefix, "Library")
  78. include_folder = os.path.join(build_prefix, "include")
  79. lib_folder = os.path.join(build_prefix, "lib")
  80. library_header_path = os.path.join(include_folder, library_header)
  81. library_found = os.path.isfile(library_header_path)
  82. conda_installed = library_found
  83. else:
  84. # Check if using Anaconda to produce wheels
  85. conda = shutil.which("conda")
  86. is_conda = conda is not None
  87. print(f"Running build on conda: {is_conda}")
  88. if is_conda:
  89. python_executable = sys.executable
  90. py_folder = os.path.dirname(python_executable)
  91. if os.name == "nt":
  92. env_path = os.path.join(py_folder, "Library")
  93. else:
  94. env_path = os.path.dirname(py_folder)
  95. lib_folder = os.path.join(env_path, "lib")
  96. include_folder = os.path.join(env_path, "include")
  97. library_header_path = os.path.join(include_folder, library_header)
  98. library_found = os.path.isfile(library_header_path)
  99. conda_installed = library_found
  100. if not library_found:
  101. if sys.platform == "linux":
  102. library_found = os.path.exists(f"/usr/include/{library_header}")
  103. library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
  104. return library_found, conda_installed, include_folder, lib_folder
  105. def get_extensions():
  106. this_dir = os.path.dirname(os.path.abspath(__file__))
  107. extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
  108. main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + glob.glob(
  109. os.path.join(extensions_dir, "ops", "*.cpp")
  110. )
  111. source_cpu = (
  112. glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
  113. + glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
  114. + glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
  115. )
  116. source_mps = glob.glob(os.path.join(extensions_dir, "ops", "mps", "*.mm"))
  117. print("Compiling extensions with following flags:")
  118. force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
  119. print(f" FORCE_CUDA: {force_cuda}")
  120. force_mps = os.getenv("FORCE_MPS", "0") == "1"
  121. print(f" FORCE_MPS: {force_mps}")
  122. debug_mode = os.getenv("DEBUG", "0") == "1"
  123. print(f" DEBUG: {debug_mode}")
  124. use_png = os.getenv("TORCHVISION_USE_PNG", "1") == "1"
  125. print(f" TORCHVISION_USE_PNG: {use_png}")
  126. use_jpeg = os.getenv("TORCHVISION_USE_JPEG", "1") == "1"
  127. print(f" TORCHVISION_USE_JPEG: {use_jpeg}")
  128. use_nvjpeg = os.getenv("TORCHVISION_USE_NVJPEG", "1") == "1"
  129. print(f" TORCHVISION_USE_NVJPEG: {use_nvjpeg}")
  130. use_ffmpeg = os.getenv("TORCHVISION_USE_FFMPEG", "1") == "1"
  131. print(f" TORCHVISION_USE_FFMPEG: {use_ffmpeg}")
  132. use_video_codec = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
  133. print(f" TORCHVISION_USE_VIDEO_CODEC: {use_video_codec}")
  134. nvcc_flags = os.getenv("NVCC_FLAGS", "")
  135. print(f" NVCC_FLAGS: {nvcc_flags}")
  136. is_rocm_pytorch = False
  137. if torch.__version__ >= "1.5":
  138. from torch.utils.cpp_extension import ROCM_HOME
  139. is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
  140. if is_rocm_pytorch:
  141. from torch.utils.hipify import hipify_python
  142. hipify_python.hipify(
  143. project_directory=this_dir,
  144. output_directory=this_dir,
  145. includes="torchvision/csrc/ops/cuda/*",
  146. show_detailed=True,
  147. is_pytorch_extension=True,
  148. )
  149. source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
  150. # Copy over additional files
  151. for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
  152. shutil.copy(file, "torchvision/csrc/ops/hip")
  153. else:
  154. source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
  155. source_cuda += glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
  156. sources = main_file + source_cpu
  157. extension = CppExtension
  158. define_macros = []
  159. extra_compile_args = {"cxx": []}
  160. if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or force_cuda:
  161. extension = CUDAExtension
  162. sources += source_cuda
  163. if not is_rocm_pytorch:
  164. define_macros += [("WITH_CUDA", None)]
  165. if nvcc_flags == "":
  166. nvcc_flags = []
  167. else:
  168. nvcc_flags = nvcc_flags.split(" ")
  169. else:
  170. define_macros += [("WITH_HIP", None)]
  171. nvcc_flags = []
  172. extra_compile_args["nvcc"] = nvcc_flags
  173. elif torch.backends.mps.is_available() or force_mps:
  174. sources += source_mps
  175. if sys.platform == "win32":
  176. define_macros += [("torchvision_EXPORTS", None)]
  177. define_macros += [("USE_PYTHON", None)]
  178. extra_compile_args["cxx"].append("/MP")
  179. if debug_mode:
  180. print("Compiling in debug mode")
  181. extra_compile_args["cxx"].append("-g")
  182. extra_compile_args["cxx"].append("-O0")
  183. if "nvcc" in extra_compile_args:
  184. # we have to remove "-OX" and "-g" flag if exists and append
  185. nvcc_flags = extra_compile_args["nvcc"]
  186. extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
  187. extra_compile_args["nvcc"].append("-O0")
  188. extra_compile_args["nvcc"].append("-g")
  189. else:
  190. print("Compiling with debug mode OFF")
  191. extra_compile_args["cxx"].append("-g0")
  192. sources = [os.path.join(extensions_dir, s) for s in sources]
  193. include_dirs = [extensions_dir]
  194. ext_modules = [
  195. extension(
  196. "torchvision._C",
  197. sorted(sources),
  198. include_dirs=include_dirs,
  199. define_macros=define_macros,
  200. extra_compile_args=extra_compile_args,
  201. )
  202. ]
  203. # ------------------- Torchvision extra extensions ------------------------
  204. vision_include = os.environ.get("TORCHVISION_INCLUDE", None)
  205. vision_library = os.environ.get("TORCHVISION_LIBRARY", None)
  206. vision_include = vision_include.split(os.pathsep) if vision_include is not None else []
  207. vision_library = vision_library.split(os.pathsep) if vision_library is not None else []
  208. include_dirs += vision_include
  209. library_dirs = vision_library
  210. # Image reading extension
  211. image_macros = []
  212. image_include = [extensions_dir]
  213. image_library = []
  214. image_link_flags = []
  215. if sys.platform == "win32":
  216. image_macros += [("USE_PYTHON", None)]
  217. # Locating libPNG
  218. libpng = shutil.which("libpng-config")
  219. pngfix = shutil.which("pngfix")
  220. png_found = libpng is not None or pngfix is not None
  221. use_png = use_png and png_found
  222. if use_png:
  223. print("Found PNG library")
  224. if libpng is not None:
  225. # Linux / Mac
  226. min_version = "1.6.0"
  227. png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE)
  228. png_version = png_version.stdout.strip().decode("utf-8")
  229. png_version = parse_version(png_version)
  230. if png_version >= parse_version(min_version):
  231. print("Building torchvision with PNG image support")
  232. png_lib = subprocess.run([libpng, "--libdir"], stdout=subprocess.PIPE)
  233. png_lib = png_lib.stdout.strip().decode("utf-8")
  234. if "disabled" not in png_lib:
  235. image_library += [png_lib]
  236. png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE)
  237. png_include = png_include.stdout.strip().decode("utf-8")
  238. _, png_include = png_include.split("-I")
  239. image_include += [png_include]
  240. image_link_flags.append("png")
  241. print(f" libpng version: {png_version}")
  242. print(f" libpng include path: {png_include}")
  243. else:
  244. print("Could not add PNG image support to torchvision:")
  245. print(f" libpng minimum version {min_version}, found {png_version}")
  246. use_png = False
  247. else:
  248. # Windows
  249. png_lib = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "lib")
  250. png_include = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "include", "libpng16")
  251. image_library += [png_lib]
  252. image_include += [png_include]
  253. image_link_flags.append("libpng")
  254. else:
  255. print("Building torchvision without PNG image support")
  256. image_macros += [("PNG_FOUND", str(int(use_png)))]
  257. # Locating libjpeg
  258. (jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include)
  259. use_jpeg = use_jpeg and jpeg_found
  260. if use_jpeg:
  261. print("Building torchvision with JPEG image support")
  262. print(f" libjpeg include path: {jpeg_include}")
  263. print(f" libjpeg lib path: {jpeg_lib}")
  264. image_link_flags.append("jpeg")
  265. if jpeg_conda:
  266. image_library += [jpeg_lib]
  267. image_include += [jpeg_include]
  268. else:
  269. print("Building torchvision without JPEG image support")
  270. image_macros += [("JPEG_FOUND", str(int(use_jpeg)))]
  271. # Locating nvjpeg
  272. # Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI
  273. nvjpeg_found = (
  274. extension is CUDAExtension
  275. and CUDA_HOME is not None
  276. and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h"))
  277. )
  278. use_nvjpeg = use_nvjpeg and nvjpeg_found
  279. if use_nvjpeg:
  280. print("Building torchvision with NVJPEG image support")
  281. image_link_flags.append("nvjpeg")
  282. else:
  283. print("Building torchvision without NVJPEG image support")
  284. image_macros += [("NVJPEG_FOUND", str(int(use_nvjpeg)))]
  285. image_path = os.path.join(extensions_dir, "io", "image")
  286. image_src = glob.glob(os.path.join(image_path, "*.cpp")) + glob.glob(os.path.join(image_path, "cpu", "*.cpp"))
  287. if is_rocm_pytorch:
  288. image_src += glob.glob(os.path.join(image_path, "hip", "*.cpp"))
  289. # we need to exclude this in favor of the hipified source
  290. image_src.remove(os.path.join(image_path, "image.cpp"))
  291. else:
  292. image_src += glob.glob(os.path.join(image_path, "cuda", "*.cpp"))
  293. if use_png or use_jpeg:
  294. ext_modules.append(
  295. extension(
  296. "torchvision.image",
  297. image_src,
  298. include_dirs=image_include + include_dirs + [image_path],
  299. library_dirs=image_library + library_dirs,
  300. define_macros=image_macros,
  301. libraries=image_link_flags,
  302. extra_compile_args=extra_compile_args,
  303. )
  304. )
  305. # Locating ffmpeg
  306. ffmpeg_exe = shutil.which("ffmpeg")
  307. has_ffmpeg = ffmpeg_exe is not None
  308. ffmpeg_version = None
  309. # FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
  310. # FIXME: causes crash. See the following GitHub issues for more details.
  311. # FIXME: https://github.com/pytorch/pytorch/issues/65000
  312. # FIXME: https://github.com/pytorch/vision/issues/3367
  313. if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
  314. has_ffmpeg = False
  315. if has_ffmpeg:
  316. try:
  317. # This is to check if ffmpeg is installed properly.
  318. ffmpeg_version = subprocess.check_output(["ffmpeg", "-version"])
  319. except subprocess.CalledProcessError:
  320. print("Building torchvision without ffmpeg support")
  321. print(" Error fetching ffmpeg version, ignoring ffmpeg.")
  322. has_ffmpeg = False
  323. use_ffmpeg = use_ffmpeg and has_ffmpeg
  324. if use_ffmpeg:
  325. ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
  326. ffmpeg_bin = os.path.dirname(ffmpeg_exe)
  327. ffmpeg_root = os.path.dirname(ffmpeg_bin)
  328. ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
  329. ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
  330. gcc = os.environ.get("CC", shutil.which("gcc"))
  331. platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
  332. platform_tag = platform_tag.stdout.strip().decode("utf-8")
  333. if platform_tag:
  334. # Most probably a Debian-based distribution
  335. ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
  336. ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
  337. else:
  338. ffmpeg_include_dir = [ffmpeg_include_dir]
  339. ffmpeg_library_dir = [ffmpeg_library_dir]
  340. for library in ffmpeg_libraries:
  341. library_found = False
  342. for search_path in ffmpeg_include_dir + include_dirs:
  343. full_path = os.path.join(search_path, library, "*.h")
  344. library_found |= len(glob.glob(full_path)) > 0
  345. if not library_found:
  346. print("Building torchvision without ffmpeg support")
  347. print(f" {library} header files were not found, disabling ffmpeg support")
  348. use_ffmpeg = False
  349. else:
  350. print("Building torchvision without ffmpeg support")
  351. if use_ffmpeg:
  352. print("Building torchvision with ffmpeg support")
  353. print(f" ffmpeg version: {ffmpeg_version}")
  354. print(f" ffmpeg include path: {ffmpeg_include_dir}")
  355. print(f" ffmpeg library_dir: {ffmpeg_library_dir}")
  356. # TorchVision base decoder + video reader
  357. video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader")
  358. video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
  359. base_decoder_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "decoder")
  360. base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
  361. # Torchvision video API
  362. videoapi_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video")
  363. videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
  364. # exclude tests
  365. base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
  366. combined_src = video_reader_src + base_decoder_src + videoapi_src
  367. ext_modules.append(
  368. CppExtension(
  369. "torchvision.video_reader",
  370. combined_src,
  371. include_dirs=[
  372. base_decoder_src_dir,
  373. video_reader_src_dir,
  374. videoapi_src_dir,
  375. extensions_dir,
  376. *ffmpeg_include_dir,
  377. *include_dirs,
  378. ],
  379. library_dirs=ffmpeg_library_dir + library_dirs,
  380. libraries=[
  381. "avcodec",
  382. "avformat",
  383. "avutil",
  384. "swresample",
  385. "swscale",
  386. ],
  387. extra_compile_args=["-std=c++17"] if os.name != "nt" else ["/std:c++17", "/MP"],
  388. extra_link_args=["-std=c++17" if os.name != "nt" else "/std:c++17"],
  389. )
  390. )
  391. # Locating video codec
  392. # CUDA_HOME should be set to the cuda root directory.
  393. # TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the location to
  394. # video codec header files and libraries respectively.
  395. video_codec_found = (
  396. extension is CUDAExtension
  397. and CUDA_HOME is not None
  398. and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in vision_include])
  399. and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in vision_include])
  400. and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in library_dirs])
  401. )
  402. use_video_codec = use_video_codec and video_codec_found
  403. if (
  404. use_video_codec
  405. and use_ffmpeg
  406. and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
  407. ):
  408. print("Building torchvision with video codec support")
  409. gpu_decoder_path = os.path.join(extensions_dir, "io", "decoder", "gpu")
  410. gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
  411. cuda_libs = os.path.join(CUDA_HOME, "lib64")
  412. cuda_inc = os.path.join(CUDA_HOME, "include")
  413. ext_modules.append(
  414. extension(
  415. "torchvision.Decoder",
  416. gpu_decoder_src,
  417. include_dirs=include_dirs + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
  418. library_dirs=ffmpeg_library_dir + library_dirs + [cuda_libs],
  419. libraries=[
  420. "avcodec",
  421. "avformat",
  422. "avutil",
  423. "swresample",
  424. "swscale",
  425. "nvcuvid",
  426. "cuda",
  427. "cudart",
  428. "z",
  429. "pthread",
  430. "dl",
  431. "nppicc",
  432. ],
  433. extra_compile_args=extra_compile_args,
  434. )
  435. )
  436. else:
  437. print("Building torchvision without video codec support")
  438. if (
  439. use_video_codec
  440. and use_ffmpeg
  441. and not any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
  442. ):
  443. print(
  444. " The installed version of ffmpeg is missing the header file 'bsf.h' which is "
  445. " required for GPU video decoding. Please install the latest ffmpeg from conda-forge channel:"
  446. " `conda install -c conda-forge ffmpeg`."
  447. )
  448. return ext_modules
  449. class clean(distutils.command.clean.clean):
  450. def run(self):
  451. with open(".gitignore") as f:
  452. ignores = f.read()
  453. for wildcard in filter(None, ignores.split("\n")):
  454. for filename in glob.glob(wildcard):
  455. try:
  456. os.remove(filename)
  457. except OSError:
  458. shutil.rmtree(filename, ignore_errors=True)
  459. # It's an old-style class in Python 2.7...
  460. distutils.command.clean.clean.run(self)
  461. if __name__ == "__main__":
  462. print(f"Building wheel {package_name}-{version}")
  463. write_version_file()
  464. with open("README.md") as f:
  465. readme = f.read()
  466. setup(
  467. # Metadata
  468. name=package_name,
  469. version=version,
  470. author="PyTorch Core Team",
  471. author_email="soumith@pytorch.org",
  472. url="https://github.com/pytorch/vision",
  473. description="image and video datasets and models for torch deep learning",
  474. long_description=readme,
  475. long_description_content_type="text/markdown",
  476. license="BSD",
  477. # Package info
  478. packages=find_packages(exclude=("test",)),
  479. package_data={package_name: ["*.dll", "*.dylib", "*.so", "prototype/datasets/_builtin/*.categories"]},
  480. zip_safe=False,
  481. install_requires=requirements,
  482. extras_require={
  483. "scipy": ["scipy"],
  484. },
  485. ext_modules=get_extensions(),
  486. python_requires=">=3.8",
  487. cmdclass={
  488. "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
  489. "clean": clean,
  490. },
  491. )