blas_compare.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. import argparse
  2. import datetime
  3. import itertools as it
  4. import multiprocessing
  5. import multiprocessing.dummy
  6. import os
  7. import queue
  8. import pickle
  9. import shutil
  10. import subprocess
  11. import sys
  12. import tempfile
  13. import threading
  14. import time
  15. from typing import Tuple, Dict
  16. from . import blas_compare_setup
  17. MIN_RUN_TIME = 1
  18. NUM_REPLICATES = 20
  19. NUM_THREAD_SETTINGS = (1, 2, 4)
  20. RESULT_FILE = os.path.join(blas_compare_setup.WORKING_ROOT, "blas_results.pkl")
  21. SCRATCH_DIR = os.path.join(blas_compare_setup.WORKING_ROOT, "scratch")
  22. BLAS_CONFIGS = (
  23. ("MKL (2020.3)", blas_compare_setup.MKL_2020_3, None),
  24. ("MKL (2020.0)", blas_compare_setup.MKL_2020_0, None),
  25. ("OpenBLAS", blas_compare_setup.OPEN_BLAS, None)
  26. )
  27. _RESULT_FILE_LOCK = threading.Lock()
  28. _WORKER_POOL: queue.Queue[Tuple[str, str, int]] = queue.Queue()
  29. def clear_worker_pool():
  30. while not _WORKER_POOL.empty():
  31. _, result_file, _ = _WORKER_POOL.get_nowait()
  32. os.remove(result_file)
  33. if os.path.exists(SCRATCH_DIR):
  34. shutil.rmtree(SCRATCH_DIR)
  35. def fill_core_pool(n: int):
  36. clear_worker_pool()
  37. os.makedirs(SCRATCH_DIR)
  38. # Reserve two cores so that bookkeeping does not interfere with runs.
  39. cpu_count = multiprocessing.cpu_count() - 2
  40. # Adjacent cores sometimes share cache, so we space out single core runs.
  41. step = max(n, 2)
  42. for i in range(0, cpu_count, step):
  43. core_str = f"{i}" if n == 1 else f"{i},{i + n - 1}"
  44. _, result_file = tempfile.mkstemp(suffix=".pkl", prefix=SCRATCH_DIR)
  45. _WORKER_POOL.put((core_str, result_file, n))
  46. def _subprocess_main(seed=0, num_threads=1, sub_label="N/A", result_file=None, env=None):
  47. import torch
  48. from torch.utils.benchmark import Timer
  49. conda_prefix = os.getenv("CONDA_PREFIX")
  50. assert conda_prefix
  51. if not torch.__file__.startswith(conda_prefix):
  52. raise ValueError(
  53. f"PyTorch mismatch: `import torch` resolved to `{torch.__file__}`, "
  54. f"which is not in the correct conda env: {conda_prefix}"
  55. )
  56. torch.manual_seed(seed)
  57. results = []
  58. for n in [4, 8, 16, 32, 64, 128, 256, 512, 1024, 7, 96, 150, 225]:
  59. dtypes = (("Single", torch.float32), ("Double", torch.float64))
  60. shapes = (
  61. # Square MatMul
  62. ((n, n), (n, n), "(n x n) x (n x n)", "Matrix-Matrix Product"),
  63. # Matrix-Vector product
  64. ((n, n), (n, 1), "(n x n) x (n x 1)", "Matrix-Vector Product"),
  65. )
  66. for (dtype_name, dtype), (x_shape, y_shape, shape_str, blas_type) in it.product(dtypes, shapes):
  67. t = Timer(
  68. stmt="torch.mm(x, y)",
  69. label=f"torch.mm {shape_str} {blas_type} ({dtype_name})",
  70. sub_label=sub_label,
  71. description=f"n = {n}",
  72. env=os.path.split(env or "")[1] or None,
  73. globals={
  74. "x": torch.rand(x_shape, dtype=dtype),
  75. "y": torch.rand(y_shape, dtype=dtype),
  76. },
  77. num_threads=num_threads,
  78. ).blocked_autorange(min_run_time=MIN_RUN_TIME)
  79. results.append(t)
  80. if result_file is not None:
  81. with open(result_file, "wb") as f:
  82. pickle.dump(results, f)
  83. def run_subprocess(args):
  84. seed, env, sub_label, extra_env_vars = args
  85. core_str = None
  86. try:
  87. core_str, result_file, num_threads = _WORKER_POOL.get()
  88. with open(result_file, "wb"):
  89. pass
  90. env_vars: Dict[str, str] = {
  91. "PATH": os.getenv("PATH") or "",
  92. "PYTHONPATH": os.getenv("PYTHONPATH") or "",
  93. # NumPy
  94. "OMP_NUM_THREADS": str(num_threads),
  95. "MKL_NUM_THREADS": str(num_threads),
  96. "NUMEXPR_NUM_THREADS": str(num_threads),
  97. }
  98. env_vars.update(extra_env_vars or {})
  99. subprocess.run(
  100. f"source activate {env} && "
  101. f"taskset --cpu-list {core_str} "
  102. f"python {os.path.abspath(__file__)} "
  103. "--DETAIL-in-subprocess "
  104. f"--DETAIL-seed {seed} "
  105. f"--DETAIL-num-threads {num_threads} "
  106. f"--DETAIL-sub-label '{sub_label}' "
  107. f"--DETAIL-result-file {result_file} "
  108. f"--DETAIL-env {env}",
  109. env=env_vars,
  110. stdout=subprocess.PIPE,
  111. shell=True
  112. )
  113. with open(result_file, "rb") as f:
  114. result_bytes = f.read()
  115. with _RESULT_FILE_LOCK, \
  116. open(RESULT_FILE, "ab") as f:
  117. f.write(result_bytes)
  118. except KeyboardInterrupt:
  119. pass # Handle ctrl-c gracefully.
  120. finally:
  121. if core_str is not None:
  122. _WORKER_POOL.put((core_str, result_file, num_threads))
  123. def _compare_main():
  124. results = []
  125. with open(RESULT_FILE, "rb") as f:
  126. while True:
  127. try:
  128. results.extend(pickle.load(f))
  129. except EOFError:
  130. break
  131. from torch.utils.benchmark import Compare
  132. comparison = Compare(results)
  133. comparison.trim_significant_figures()
  134. comparison.colorize()
  135. comparison.print()
  136. def main():
  137. with open(RESULT_FILE, "wb"):
  138. pass
  139. for num_threads in NUM_THREAD_SETTINGS:
  140. fill_core_pool(num_threads)
  141. workers = _WORKER_POOL.qsize()
  142. trials = []
  143. for seed in range(NUM_REPLICATES):
  144. for sub_label, env, extra_env_vars in BLAS_CONFIGS:
  145. env_path = os.path.join(blas_compare_setup.WORKING_ROOT, env)
  146. trials.append((seed, env_path, sub_label, extra_env_vars))
  147. n = len(trials)
  148. with multiprocessing.dummy.Pool(workers) as pool:
  149. start_time = time.time()
  150. for i, r in enumerate(pool.imap(run_subprocess, trials)):
  151. n_trials_done = i + 1
  152. time_per_result = (time.time() - start_time) / n_trials_done
  153. eta = int((n - n_trials_done) * time_per_result)
  154. print(f"\r{i + 1} / {n} ETA:{datetime.timedelta(seconds=eta)}".ljust(80), end="")
  155. sys.stdout.flush()
  156. print(f"\r{n} / {n} Total time: {datetime.timedelta(seconds=int(time.time() - start_time))}")
  157. print()
  158. # Any env will do, it just needs to have torch for benchmark utils.
  159. env_path = os.path.join(blas_compare_setup.WORKING_ROOT, BLAS_CONFIGS[0][1])
  160. subprocess.run(
  161. f"source activate {env_path} && "
  162. f"python {os.path.abspath(__file__)} "
  163. "--DETAIL-in-compare",
  164. shell=True
  165. )
  166. if __name__ == "__main__":
  167. # These flags are for subprocess control, not controlling the main loop.
  168. parser = argparse.ArgumentParser()
  169. parser.add_argument("--DETAIL-in-subprocess", "--DETAIL_in_subprocess", action="store_true")
  170. parser.add_argument("--DETAIL-in-compare", "--DETAIL_in_compare", action="store_true")
  171. parser.add_argument("--DETAIL-seed", "--DETAIL_seed", type=int, default=None)
  172. parser.add_argument("--DETAIL-num-threads", "--DETAIL_num_threads", type=int, default=None)
  173. parser.add_argument("--DETAIL-sub-label", "--DETAIL_sub_label", type=str, default="N/A")
  174. parser.add_argument("--DETAIL-result-file", "--DETAIL_result_file", type=str, default=None)
  175. parser.add_argument("--DETAIL-env", "--DETAIL_env", type=str, default=None)
  176. args = parser.parse_args()
  177. if args.DETAIL_in_subprocess:
  178. try:
  179. _subprocess_main(
  180. args.DETAIL_seed,
  181. args.DETAIL_num_threads,
  182. args.DETAIL_sub_label,
  183. args.DETAIL_result_file,
  184. args.DETAIL_env,
  185. )
  186. except KeyboardInterrupt:
  187. pass # Handle ctrl-c gracefully.
  188. elif args.DETAIL_in_compare:
  189. _compare_main()
  190. else:
  191. main()