config.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. from typing import Dict, Union
  2. from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
  3. def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
  4. if isinstance(g, NativeFunctionsGroup):
  5. return str(g.functional.func.name.name.base)
  6. else:
  7. return str(g.view.root_name)
  8. is_hand_written_ops_ = frozenset(
  9. (
  10. "abs",
  11. "add",
  12. "addmm",
  13. "all",
  14. "any",
  15. "argmin",
  16. "bmm",
  17. "clamp",
  18. "clamp_min",
  19. "cumsum",
  20. "div",
  21. "fmod",
  22. "index_select",
  23. "leaky_relu",
  24. "linear",
  25. "log",
  26. "matmul",
  27. "mul",
  28. "narrow_copy",
  29. "nonzero",
  30. "pow",
  31. "remainder",
  32. "sigmoid",
  33. "sign",
  34. "sub",
  35. "tanh",
  36. "detach",
  37. "expand_as",
  38. "flatten",
  39. "narrow",
  40. "reshape_as",
  41. "select",
  42. "slice",
  43. "softmax",
  44. "split",
  45. "squeeze",
  46. "transpose",
  47. "view",
  48. "where",
  49. )
  50. )
  51. def is_hand_written(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
  52. name_base = func_name_base_str(g)
  53. return name_base in is_hand_written_ops_
  54. def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
  55. assert index == 0 or index == 1
  56. if op_name == "addr":
  57. if index == 0:
  58. arg_map["self"] = "at::rand({6, 6})"
  59. arg_map["vec1"] = "at::rand({6})"
  60. arg_map["vec2"] = "at::rand({6})"
  61. else:
  62. arg_map["self"] = "at::rand({22, 22})"
  63. arg_map["vec1"] = "at::rand({22})"
  64. arg_map["vec2"] = "at::rand({22})"
  65. return
  66. if op_name == "mv":
  67. if index == 0:
  68. arg_map["self"] = "at::rand({6, 6})"
  69. arg_map["vec"] = "at::rand({6})"
  70. else:
  71. arg_map["self"] = "at::rand({22, 22})"
  72. arg_map["vec"] = "at::rand({22})"
  73. return
  74. if op_name == "addbmm":
  75. if index == 0:
  76. arg_map["self"] = "at::rand({6, 6})"
  77. else:
  78. arg_map["self"] = "at::rand({22, 22})"
  79. return
  80. if op_name == "cross":
  81. if index == 0:
  82. arg_map["self"] = "at::rand({3, 3, 3})"
  83. arg_map["other"] = "at::rand({3, 3, 3})"
  84. else:
  85. arg_map["self"] = "at::rand({22, 3, 22})"
  86. arg_map["other"] = "at::rand({22, 3, 22})"
  87. return
  88. if op_name == "take":
  89. if index == 0:
  90. arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
  91. else:
  92. arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
  93. return
  94. if op_name == "take_along_dim":
  95. if index == 0:
  96. arg_map["indices"] = "at::argsort(self0, 1, true)"
  97. else:
  98. arg_map["indices"] = "at::argsort(self1, 1, true)"
  99. return
  100. if op_name == "masked_select":
  101. if index == 0:
  102. arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
  103. else:
  104. arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
  105. return
  106. if op_name == "orgqr":
  107. if index == 0:
  108. arg_map["input2"] = "at::rand({6, 6})"
  109. else:
  110. arg_map["input2"] = "at::rand({22, 22})"
  111. return
  112. if op_name == "ormqr":
  113. if index == 0:
  114. arg_map["input2"] = "at::rand({6, 6})"
  115. else:
  116. arg_map["input2"] = "at::rand({22, 22})"
  117. return
  118. if op_name == "quantile":
  119. if index == 0:
  120. arg_map["q"] = "at::rand({6})"
  121. arg_map["interpolation"] = '"linear"'
  122. else:
  123. arg_map["q"] = "at::rand({22})"
  124. arg_map["interpolation"] = '"linear"'
  125. return
  126. if op_name == "nanquantile":
  127. if index == 0:
  128. arg_map["q"] = "at::rand({6})"
  129. arg_map["interpolation"] = '"linear"'
  130. else:
  131. arg_map["q"] = "at::rand({22})"
  132. arg_map["interpolation"] = '"linear"'
  133. return
  134. if op_name == "multi_margin_loss":
  135. if index == 0:
  136. arg_map["self"] = "at::rand({6, 6})"
  137. arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
  138. arg_map["weight"] = "at::rand({6})"
  139. else:
  140. arg_map["self"] = "at::rand({22, 22})"
  141. arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
  142. arg_map["weight"] = "at::rand({22})"
  143. return
  144. if op_name == "multilabel_margin_loss":
  145. if index == 0:
  146. arg_map["self"] = "at::rand({6, 6})"
  147. arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
  148. else:
  149. arg_map["self"] = "at::rand({22, 22})"
  150. arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
  151. return
  152. if op_name == "nll_loss":
  153. if index == 0:
  154. arg_map["self"] = "at::rand({6, 6})"
  155. arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
  156. arg_map["weight"] = "at::rand({6})"
  157. else:
  158. arg_map["self"] = "at::rand({22, 22})"
  159. arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
  160. arg_map["weight"] = "at::rand({22})"
  161. return
  162. if op_name == "nll_loss2d":
  163. if index == 0:
  164. arg_map["self"] = "at::rand({6, 6, 6, 6})"
  165. arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
  166. arg_map["weight"] = "at::rand({6})"
  167. else:
  168. arg_map["self"] = "at::rand({22, 22, 22, 22})"
  169. arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
  170. arg_map["weight"] = "at::rand({22})"
  171. return
  172. if op_name in (
  173. "fft_fft",
  174. "fft_ifft",
  175. "fft_rfft",
  176. "fft_irfft",
  177. "fft_hfft",
  178. "fft_ihfft",
  179. ):
  180. arg_map["norm"] = '"forward"'
  181. return
  182. if op_name == "linalg_tensorinv":
  183. if index == 0:
  184. arg_map["self"] = "at::rand({6, 6, 6, 6})"
  185. arg_map["ind"] = "2"
  186. else:
  187. arg_map["self"] = "at::rand({22, 22, 22, 22})"
  188. arg_map["ind"] = "2"
  189. return
  190. if op_name == "addmv":
  191. if index == 0:
  192. arg_map["self"] = "at::rand({2})"
  193. arg_map["mat"] = "at::rand({2, 2})"
  194. arg_map["vec"] = "at::rand({2})"
  195. else:
  196. arg_map["self"] = "at::rand({35})"
  197. arg_map["mat"] = "at::rand({35, 35})"
  198. arg_map["vec"] = "at::rand({35})"
  199. return
  200. if op_name == "acosh":
  201. if index == 0:
  202. arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
  203. else:
  204. arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
  205. return
  206. if op_name == "adaptive_max_pool2d_backward":
  207. if index == 0:
  208. arg_map["grad_output"] = "at::rand({2, 2, 2}, at::kFloat)"
  209. arg_map["self"] = "at::rand({2, 2, 2}, at::kFloat)"
  210. arg_map["indices"] = "at::randint(0, 1, {2, 2, 2}, at::kLong)"
  211. else:
  212. arg_map["grad_output"] = "at::rand({3, 3, 3}, at::kFloat)"
  213. arg_map["self"] = "at::rand({3, 3, 3}, at::kFloat)"
  214. arg_map["indices"] = "at::randint(0, 1, {3, 3, 3}, at::kLong)"
  215. return
  216. if op_name == "adaptive_max_pool3d_backward":
  217. if index == 0:
  218. arg_map["grad_output"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
  219. arg_map["self"] = "at::rand({2, 2, 2, 2}, at::kFloat)"
  220. arg_map["indices"] = "at::randint(0, 1, {2, 2, 2, 2}, at::kLong)"
  221. else:
  222. arg_map["grad_output"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
  223. arg_map["self"] = "at::rand({3, 3, 3, 3}, at::kFloat)"
  224. arg_map["indices"] = "at::randint(0, 1, {3, 3, 3, 3}, at::kLong)"
  225. return
  226. if op_name == "bitwise_left_shift":
  227. if index == 0:
  228. arg_map["self"] = "at::randint(1, 1 << 4, {6, 6, 6}, at::kInt)"
  229. arg_map["other"] = "at::randint(1, 26, {6, 6, 6}, at::kInt)"
  230. else:
  231. arg_map["self"] = "at::randint(1, 1 << 4, {22, 22, 22}, at::kInt)"
  232. arg_map["other"] = "at::randint(1, 26, {22, 22, 22}, at::kInt)"
  233. return
  234. if op_name == "bitwise_right_shift":
  235. if index == 0:
  236. arg_map["self"] = "at::randint(1 << 21, 1 << 30, {6, 6, 6}, at::kInt)"
  237. arg_map["other"] = "at::randint(1, 22, {6, 6, 6}, at::kInt)"
  238. else:
  239. arg_map["self"] = "at::randint(1 << 21, 1 << 30, {22, 22, 22}, at::kInt)"
  240. arg_map["other"] = "at::randint(1, 22, {22, 22, 22}, at::kInt)"
  241. return
  242. if op_name == "gather":
  243. if index == 0:
  244. arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
  245. arg_map["dim"] = "1"
  246. arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
  247. arg_map["sparse_grad"] = "false"
  248. else:
  249. arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
  250. arg_map["dim"] = "1"
  251. arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
  252. arg_map["sparse_grad"] = "false"
  253. return
  254. if op_name == "gelu":
  255. if index == 0:
  256. arg_map["self"] = "at::rand({6, 6, 6})"
  257. arg_map["approximate"] = '"tanh"'
  258. else:
  259. arg_map["self"] = "at::rand({22, 22, 22})"
  260. arg_map["approximate"] = '"tanh"'
  261. return
  262. if op_name == "gelu_backward":
  263. if index == 0:
  264. arg_map["grad_output"] = "at::rand({6, 6, 6})"
  265. arg_map["self"] = "at::rand({6, 6, 6})"
  266. arg_map["approximate"] = '"tanh"'
  267. else:
  268. arg_map["grad_output"] = "at::rand({22, 22, 22})"
  269. arg_map["self"] = "at::rand({22, 22, 22})"
  270. arg_map["approximate"] = '"tanh"'
  271. return
  272. if op_name == "index_add":
  273. if index == 0:
  274. arg_map["self"] = "at::rand({2})"
  275. arg_map["dim"] = "0"
  276. arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
  277. arg_map["source"] = "at::rand({2})"
  278. arg_map["alpha"] = "2"
  279. else:
  280. arg_map["self"] = "at::rand({16})"
  281. arg_map["dim"] = "0"
  282. arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
  283. arg_map["source"] = "at::rand({16})"
  284. arg_map["alpha"] = "2"
  285. return
  286. if op_name == "index_copy":
  287. if index == 0:
  288. arg_map["self"] = "at::rand({2})"
  289. arg_map["dim"] = "0"
  290. arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
  291. arg_map["source"] = "at::rand({2})"
  292. else:
  293. arg_map["self"] = "at::rand({32})"
  294. arg_map["dim"] = "0"
  295. arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
  296. arg_map["source"] = "at::rand({32})"
  297. return
  298. if op_name == "linalg_cross":
  299. if index == 0:
  300. arg_map["self"] = "at::rand({6, 3, 6})"
  301. arg_map["other"] = "at::rand({6, 3, 6})"
  302. arg_map["dim"] = "1"
  303. else:
  304. arg_map["self"] = "at::rand({22, 3, 22})"
  305. arg_map["other"] = "at::rand({22, 3, 22})"
  306. arg_map["dim"] = "1"
  307. return
  308. if op_name == "nll_loss_backward":
  309. if index == 0:
  310. arg_map["grad_output"] = "at::rand({})"
  311. arg_map["self"] = "at::rand({6})"
  312. arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
  313. arg_map["weight"] = "at::rand({6})"
  314. arg_map["reduction"] = "1"
  315. arg_map["ignore_index"] = "1"
  316. arg_map["total_weight"] = "at::rand({})"
  317. else:
  318. arg_map["grad_output"] = "at::rand({})"
  319. arg_map["self"] = "at::rand({36})"
  320. arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
  321. arg_map["weight"] = "at::rand({36})"
  322. arg_map["reduction"] = "1"
  323. arg_map["ignore_index"] = "1"
  324. arg_map["total_weight"] = "at::rand({})"
  325. return
  326. if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
  327. if index == 0:
  328. arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
  329. arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
  330. arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
  331. else:
  332. arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
  333. arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
  334. arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
  335. if "reduce" in arg_map:
  336. arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
  337. return
  338. if op_name == "scatter_reduce":
  339. arg_map["reduce"] = '"mean"'
  340. if index == 0:
  341. arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
  342. else:
  343. arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
  344. return
  345. if op_name == "special_zeta":
  346. if index == 0:
  347. arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
  348. arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
  349. else:
  350. arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
  351. arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
  352. return
  353. if op_name == "_convert_indices_from_csr_to_coo":
  354. if index == 0:
  355. arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
  356. arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
  357. arg_map["out_int32"] = "false"
  358. else:
  359. arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
  360. arg_map[
  361. "col_indices"
  362. ] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
  363. arg_map["out_int32"] = "false"
  364. return
  365. if op_name == "_convert_indices_from_coo_to_csr":
  366. if index == 0:
  367. arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
  368. arg_map["size"] = "10"
  369. arg_map["out_int32"] = "false"
  370. else:
  371. arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
  372. arg_map["size"] = "24"
  373. arg_map["out_int32"] = "false"
  374. return
  375. if op_name in ("diagonal", "linalg_diagonal"):
  376. arg_map["offset"] = "0"
  377. arg_map["dim0"] = "1"
  378. arg_map["dim1"] = "2"
  379. return