collect_env.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. # Unlike the rest of the PyTorch this file must be python2 compliant.
  2. # This script outputs relevant system environment info
  3. # Run it with `python collect_env.py`.
  4. import datetime
  5. import locale
  6. import re
  7. import subprocess
  8. import sys
  9. import os
  10. from collections import namedtuple
  11. try:
  12. import torch
  13. TORCH_AVAILABLE = True
  14. except (ImportError, NameError, AttributeError, OSError):
  15. TORCH_AVAILABLE = False
  16. # System Environment Information
  17. SystemEnv = namedtuple('SystemEnv', [
  18. 'torch_version',
  19. 'is_debug_build',
  20. 'cuda_compiled_version',
  21. 'gcc_version',
  22. 'clang_version',
  23. 'cmake_version',
  24. 'os',
  25. 'libc_version',
  26. 'python_version',
  27. 'python_platform',
  28. 'is_cuda_available',
  29. 'cuda_runtime_version',
  30. 'cuda_module_loading',
  31. 'nvidia_driver_version',
  32. 'nvidia_gpu_models',
  33. 'cudnn_version',
  34. 'pip_version', # 'pip' or 'pip3'
  35. 'pip_packages',
  36. 'conda_packages',
  37. 'hip_compiled_version',
  38. 'hip_runtime_version',
  39. 'miopen_runtime_version',
  40. 'caching_allocator_config',
  41. 'is_xnnpack_available',
  42. 'cpu_info',
  43. ])
  44. def run(command):
  45. """Returns (return-code, stdout, stderr)"""
  46. p = subprocess.Popen(command, stdout=subprocess.PIPE,
  47. stderr=subprocess.PIPE, shell=True)
  48. raw_output, raw_err = p.communicate()
  49. rc = p.returncode
  50. if get_platform() == 'win32':
  51. enc = 'oem'
  52. else:
  53. enc = locale.getpreferredencoding()
  54. output = raw_output.decode(enc)
  55. err = raw_err.decode(enc)
  56. return rc, output.strip(), err.strip()
  57. def run_and_read_all(run_lambda, command):
  58. """Runs command using run_lambda; reads and returns entire output if rc is 0"""
  59. rc, out, _ = run_lambda(command)
  60. if rc != 0:
  61. return None
  62. return out
  63. def run_and_parse_first_match(run_lambda, command, regex):
  64. """Runs command using run_lambda, returns the first regex match if it exists"""
  65. rc, out, _ = run_lambda(command)
  66. if rc != 0:
  67. return None
  68. match = re.search(regex, out)
  69. if match is None:
  70. return None
  71. return match.group(1)
  72. def run_and_return_first_line(run_lambda, command):
  73. """Runs command using run_lambda and returns first line if output is not empty"""
  74. rc, out, _ = run_lambda(command)
  75. if rc != 0:
  76. return None
  77. return out.split('\n')[0]
  78. def get_conda_packages(run_lambda):
  79. conda = os.environ.get('CONDA_EXE', 'conda')
  80. out = run_and_read_all(run_lambda, "{} list".format(conda))
  81. if out is None:
  82. return out
  83. return "\n".join(
  84. line
  85. for line in out.splitlines()
  86. if not line.startswith("#")
  87. and any(
  88. name in line
  89. for name in {
  90. "torch",
  91. "numpy",
  92. "cudatoolkit",
  93. "soumith",
  94. "mkl",
  95. "magma",
  96. "mkl",
  97. }
  98. )
  99. )
  100. def get_gcc_version(run_lambda):
  101. return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
  102. def get_clang_version(run_lambda):
  103. return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)')
  104. def get_cmake_version(run_lambda):
  105. return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
  106. def get_nvidia_driver_version(run_lambda):
  107. if get_platform() == 'darwin':
  108. cmd = 'kextstat | grep -i cuda'
  109. return run_and_parse_first_match(run_lambda, cmd,
  110. r'com[.]nvidia[.]CUDA [(](.*?)[)]')
  111. smi = get_nvidia_smi()
  112. return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
  113. def get_gpu_info(run_lambda):
  114. if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
  115. if TORCH_AVAILABLE and torch.cuda.is_available():
  116. return torch.cuda.get_device_name(None)
  117. return None
  118. smi = get_nvidia_smi()
  119. uuid_regex = re.compile(r' \(UUID: .+?\)')
  120. rc, out, _ = run_lambda(smi + ' -L')
  121. if rc != 0:
  122. return None
  123. # Anonymize GPUs by removing their UUID
  124. return re.sub(uuid_regex, '', out)
  125. def get_running_cuda_version(run_lambda):
  126. return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
  127. def get_cudnn_version(run_lambda):
  128. """This will return a list of libcudnn.so; it's hard to tell which one is being used"""
  129. if get_platform() == 'win32':
  130. system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
  131. cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
  132. where_cmd = os.path.join(system_root, 'System32', 'where')
  133. cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
  134. elif get_platform() == 'darwin':
  135. # CUDA libraries and drivers can be found in /usr/local/cuda/. See
  136. # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
  137. # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
  138. # Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
  139. cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
  140. else:
  141. cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
  142. rc, out, _ = run_lambda(cudnn_cmd)
  143. # find will return 1 if there are permission errors or if not found
  144. if len(out) == 0 or (rc != 1 and rc != 0):
  145. l = os.environ.get('CUDNN_LIBRARY')
  146. if l is not None and os.path.isfile(l):
  147. return os.path.realpath(l)
  148. return None
  149. files_set = set()
  150. for fn in out.split('\n'):
  151. fn = os.path.realpath(fn) # eliminate symbolic links
  152. if os.path.isfile(fn):
  153. files_set.add(fn)
  154. if not files_set:
  155. return None
  156. # Alphabetize the result because the order is non-deterministic otherwise
  157. files = sorted(files_set)
  158. if len(files) == 1:
  159. return files[0]
  160. result = '\n'.join(files)
  161. return 'Probably one of the following:\n{}'.format(result)
  162. def get_nvidia_smi():
  163. # Note: nvidia-smi is currently available only on Windows and Linux
  164. smi = 'nvidia-smi'
  165. if get_platform() == 'win32':
  166. system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
  167. program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
  168. legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
  169. new_path = os.path.join(system_root, 'System32', smi)
  170. smis = [new_path, legacy_path]
  171. for candidate_smi in smis:
  172. if os.path.exists(candidate_smi):
  173. smi = '"{}"'.format(candidate_smi)
  174. break
  175. return smi
  176. # example outputs of CPU infos
  177. # * linux
  178. # Architecture: x86_64
  179. # CPU op-mode(s): 32-bit, 64-bit
  180. # Address sizes: 46 bits physical, 48 bits virtual
  181. # Byte Order: Little Endian
  182. # CPU(s): 128
  183. # On-line CPU(s) list: 0-127
  184. # Vendor ID: GenuineIntel
  185. # Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
  186. # CPU family: 6
  187. # Model: 106
  188. # Thread(s) per core: 2
  189. # Core(s) per socket: 32
  190. # Socket(s): 2
  191. # Stepping: 6
  192. # BogoMIPS: 5799.78
  193. # Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr
  194. # sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl
  195. # xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16
  196. # pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand
  197. # hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced
  198. # fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap
  199. # avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1
  200. # xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq
  201. # avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
  202. # Virtualization features:
  203. # Hypervisor vendor: KVM
  204. # Virtualization type: full
  205. # Caches (sum of all):
  206. # L1d: 3 MiB (64 instances)
  207. # L1i: 2 MiB (64 instances)
  208. # L2: 80 MiB (64 instances)
  209. # L3: 108 MiB (2 instances)
  210. # NUMA:
  211. # NUMA node(s): 2
  212. # NUMA node0 CPU(s): 0-31,64-95
  213. # NUMA node1 CPU(s): 32-63,96-127
  214. # Vulnerabilities:
  215. # Itlb multihit: Not affected
  216. # L1tf: Not affected
  217. # Mds: Not affected
  218. # Meltdown: Not affected
  219. # Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
  220. # Retbleed: Not affected
  221. # Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
  222. # Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
  223. # Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
  224. # Srbds: Not affected
  225. # Tsx async abort: Not affected
  226. # * win32
  227. # Architecture=9
  228. # CurrentClockSpeed=2900
  229. # DeviceID=CPU0
  230. # Family=179
  231. # L2CacheSize=40960
  232. # L2CacheSpeed=
  233. # Manufacturer=GenuineIntel
  234. # MaxClockSpeed=2900
  235. # Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
  236. # ProcessorType=3
  237. # Revision=27142
  238. #
  239. # Architecture=9
  240. # CurrentClockSpeed=2900
  241. # DeviceID=CPU1
  242. # Family=179
  243. # L2CacheSize=40960
  244. # L2CacheSpeed=
  245. # Manufacturer=GenuineIntel
  246. # MaxClockSpeed=2900
  247. # Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
  248. # ProcessorType=3
  249. # Revision=27142
  250. def get_cpu_info(run_lambda):
  251. rc, out, err = 0, '', ''
  252. if get_platform() == 'linux':
  253. rc, out, err = run_lambda('lscpu')
  254. elif get_platform() == 'win32':
  255. rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID,\
  256. CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE')
  257. elif get_platform() == 'darwin':
  258. rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
  259. cpu_info = 'None'
  260. if rc == 0:
  261. cpu_info = out
  262. else:
  263. cpu_info = err
  264. return cpu_info
  265. def get_platform():
  266. if sys.platform.startswith('linux'):
  267. return 'linux'
  268. elif sys.platform.startswith('win32'):
  269. return 'win32'
  270. elif sys.platform.startswith('cygwin'):
  271. return 'cygwin'
  272. elif sys.platform.startswith('darwin'):
  273. return 'darwin'
  274. else:
  275. return sys.platform
  276. def get_mac_version(run_lambda):
  277. return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
  278. def get_windows_version(run_lambda):
  279. system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
  280. wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
  281. findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
  282. return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd))
  283. def get_lsb_version(run_lambda):
  284. return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
  285. def check_release_file(run_lambda):
  286. return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
  287. r'PRETTY_NAME="(.*)"')
  288. def get_os(run_lambda):
  289. from platform import machine
  290. platform = get_platform()
  291. if platform == 'win32' or platform == 'cygwin':
  292. return get_windows_version(run_lambda)
  293. if platform == 'darwin':
  294. version = get_mac_version(run_lambda)
  295. if version is None:
  296. return None
  297. return 'macOS {} ({})'.format(version, machine())
  298. if platform == 'linux':
  299. # Ubuntu/Debian based
  300. desc = get_lsb_version(run_lambda)
  301. if desc is not None:
  302. return '{} ({})'.format(desc, machine())
  303. # Try reading /etc/*-release
  304. desc = check_release_file(run_lambda)
  305. if desc is not None:
  306. return '{} ({})'.format(desc, machine())
  307. return '{} ({})'.format(platform, machine())
  308. # Unknown platform
  309. return platform
  310. def get_python_platform():
  311. import platform
  312. return platform.platform()
  313. def get_libc_version():
  314. import platform
  315. if get_platform() != 'linux':
  316. return 'N/A'
  317. return '-'.join(platform.libc_ver())
  318. def get_pip_packages(run_lambda):
  319. """Returns `pip list` output. Note: will also find conda-installed pytorch
  320. and numpy packages."""
  321. # People generally have `pip` as `pip` or `pip3`
  322. # But here it is incoved as `python -mpip`
  323. def run_with_pip(pip):
  324. out = run_and_read_all(run_lambda, "{} list --format=freeze".format(pip))
  325. return "\n".join(
  326. line
  327. for line in out.splitlines()
  328. if any(
  329. name in line
  330. for name in {
  331. "torch",
  332. "numpy",
  333. "mypy",
  334. }
  335. )
  336. )
  337. pip_version = 'pip3' if sys.version[0] == '3' else 'pip'
  338. out = run_with_pip(sys.executable + ' -mpip')
  339. return pip_version, out
  340. def get_cachingallocator_config():
  341. ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
  342. return ca_config
  343. def get_cuda_module_loading_config():
  344. if TORCH_AVAILABLE and torch.cuda.is_available():
  345. torch.cuda.init()
  346. config = os.environ.get('CUDA_MODULE_LOADING', '')
  347. return config
  348. else:
  349. return "N/A"
  350. def is_xnnpack_available():
  351. if TORCH_AVAILABLE:
  352. import torch.backends.xnnpack
  353. return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined]
  354. else:
  355. return "N/A"
  356. def get_env_info():
  357. run_lambda = run
  358. pip_version, pip_list_output = get_pip_packages(run_lambda)
  359. if TORCH_AVAILABLE:
  360. version_str = torch.__version__
  361. debug_mode_str = str(torch.version.debug)
  362. cuda_available_str = str(torch.cuda.is_available())
  363. cuda_version_str = torch.version.cuda
  364. if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
  365. hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
  366. else: # HIP version
  367. cfg = torch._C._show_config().split('\n')
  368. hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
  369. miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0]
  370. cuda_version_str = 'N/A'
  371. hip_compiled_version = torch.version.hip
  372. else:
  373. version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
  374. hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
  375. sys_version = sys.version.replace("\n", " ")
  376. return SystemEnv(
  377. torch_version=version_str,
  378. is_debug_build=debug_mode_str,
  379. python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1),
  380. python_platform=get_python_platform(),
  381. is_cuda_available=cuda_available_str,
  382. cuda_compiled_version=cuda_version_str,
  383. cuda_runtime_version=get_running_cuda_version(run_lambda),
  384. cuda_module_loading=get_cuda_module_loading_config(),
  385. nvidia_gpu_models=get_gpu_info(run_lambda),
  386. nvidia_driver_version=get_nvidia_driver_version(run_lambda),
  387. cudnn_version=get_cudnn_version(run_lambda),
  388. hip_compiled_version=hip_compiled_version,
  389. hip_runtime_version=hip_runtime_version,
  390. miopen_runtime_version=miopen_runtime_version,
  391. pip_version=pip_version,
  392. pip_packages=pip_list_output,
  393. conda_packages=get_conda_packages(run_lambda),
  394. os=get_os(run_lambda),
  395. libc_version=get_libc_version(),
  396. gcc_version=get_gcc_version(run_lambda),
  397. clang_version=get_clang_version(run_lambda),
  398. cmake_version=get_cmake_version(run_lambda),
  399. caching_allocator_config=get_cachingallocator_config(),
  400. is_xnnpack_available=is_xnnpack_available(),
  401. cpu_info=get_cpu_info(run_lambda),
  402. )
  403. env_info_fmt = """
  404. PyTorch version: {torch_version}
  405. Is debug build: {is_debug_build}
  406. CUDA used to build PyTorch: {cuda_compiled_version}
  407. ROCM used to build PyTorch: {hip_compiled_version}
  408. OS: {os}
  409. GCC version: {gcc_version}
  410. Clang version: {clang_version}
  411. CMake version: {cmake_version}
  412. Libc version: {libc_version}
  413. Python version: {python_version}
  414. Python platform: {python_platform}
  415. Is CUDA available: {is_cuda_available}
  416. CUDA runtime version: {cuda_runtime_version}
  417. CUDA_MODULE_LOADING set to: {cuda_module_loading}
  418. GPU models and configuration: {nvidia_gpu_models}
  419. Nvidia driver version: {nvidia_driver_version}
  420. cuDNN version: {cudnn_version}
  421. HIP runtime version: {hip_runtime_version}
  422. MIOpen runtime version: {miopen_runtime_version}
  423. Is XNNPACK available: {is_xnnpack_available}
  424. CPU:
  425. {cpu_info}
  426. Versions of relevant libraries:
  427. {pip_packages}
  428. {conda_packages}
  429. """.strip()
  430. def pretty_str(envinfo):
  431. def replace_nones(dct, replacement='Could not collect'):
  432. for key in dct.keys():
  433. if dct[key] is not None:
  434. continue
  435. dct[key] = replacement
  436. return dct
  437. def replace_bools(dct, true='Yes', false='No'):
  438. for key in dct.keys():
  439. if dct[key] is True:
  440. dct[key] = true
  441. elif dct[key] is False:
  442. dct[key] = false
  443. return dct
  444. def prepend(text, tag='[prepend]'):
  445. lines = text.split('\n')
  446. updated_lines = [tag + line for line in lines]
  447. return '\n'.join(updated_lines)
  448. def replace_if_empty(text, replacement='No relevant packages'):
  449. if text is not None and len(text) == 0:
  450. return replacement
  451. return text
  452. def maybe_start_on_next_line(string):
  453. # If `string` is multiline, prepend a \n to it.
  454. if string is not None and len(string.split('\n')) > 1:
  455. return '\n{}\n'.format(string)
  456. return string
  457. mutable_dict = envinfo._asdict()
  458. # If nvidia_gpu_models is multiline, start on the next line
  459. mutable_dict['nvidia_gpu_models'] = \
  460. maybe_start_on_next_line(envinfo.nvidia_gpu_models)
  461. # If the machine doesn't have CUDA, report some fields as 'No CUDA'
  462. dynamic_cuda_fields = [
  463. 'cuda_runtime_version',
  464. 'nvidia_gpu_models',
  465. 'nvidia_driver_version',
  466. ]
  467. all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
  468. all_dynamic_cuda_fields_missing = all(
  469. mutable_dict[field] is None for field in dynamic_cuda_fields)
  470. if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
  471. for field in all_cuda_fields:
  472. mutable_dict[field] = 'No CUDA'
  473. if envinfo.cuda_compiled_version is None:
  474. mutable_dict['cuda_compiled_version'] = 'None'
  475. # Replace True with Yes, False with No
  476. mutable_dict = replace_bools(mutable_dict)
  477. # Replace all None objects with 'Could not collect'
  478. mutable_dict = replace_nones(mutable_dict)
  479. # If either of these are '', replace with 'No relevant packages'
  480. mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
  481. mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
  482. # Tag conda and pip packages with a prefix
  483. # If they were previously None, they'll show up as ie '[conda] Could not collect'
  484. if mutable_dict['pip_packages']:
  485. mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
  486. '[{}] '.format(envinfo.pip_version))
  487. if mutable_dict['conda_packages']:
  488. mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
  489. '[conda] ')
  490. mutable_dict['cpu_info'] = envinfo.cpu_info
  491. return env_info_fmt.format(**mutable_dict)
  492. def get_pretty_env_info():
  493. return pretty_str(get_env_info())
  494. def main():
  495. print("Collecting environment information...")
  496. output = get_pretty_env_info()
  497. print(output)
  498. if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'):
  499. minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR
  500. if sys.platform == "linux" and os.path.exists(minidump_dir):
  501. dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)]
  502. latest = max(dumps, key=os.path.getctime)
  503. ctime = os.path.getctime(latest)
  504. creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S')
  505. msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \
  506. "if this is related to your bug please include it when you file a report ***"
  507. print(msg, file=sys.stderr)
  508. if __name__ == '__main__':
  509. main()