123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142 |
- import logging
- import torch
- from ..lowering import register_lowering
- from ..select_algorithm import (
- autotune_select_algorithm,
- ExternKernelChoice,
- TritonTemplate,
- )
- from ..utils import use_triton_template
- from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_grid, mm_options
- log = logging.getLogger(__name__)
- aten = torch.ops.aten
- mm_template = TritonTemplate(
- name="mm",
- grid=mm_grid,
- source=r"""
- {{def_kernel("A", "B")}}
- M = {{size("A", 0)}}
- N = {{size("B", 1)}}
- K = {{size("A", 1)}}
- stride_am = {{stride("A", 0)}}
- stride_ak = {{stride("A", 1)}}
- stride_bk = {{stride("B", 0)}}
- stride_bn = {{stride("B", 1)}}
- # based on triton.ops.matmul
- pid = tl.program_id(0)
- grid_m = (M + BLOCK_M - 1) // BLOCK_M
- grid_n = (N + BLOCK_N - 1) // BLOCK_N
- # re-order program ID for better L2 performance
- width = GROUP_M * grid_n
- group_id = pid // width
- group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
- pid_m = group_id * GROUP_M + (pid % group_size)
- pid_n = (pid % width) // (group_size)
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
- ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
- rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
- rk = tl.arange(0, BLOCK_K)
- A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
- B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
- acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
- for k in range(K, 0, -BLOCK_K):
- if EVEN_K:
- a = tl.load(A)
- b = tl.load(B)
- else:
- a = tl.load(A, mask=rk[None, :] < k, other=0.)
- b = tl.load(B, mask=rk[:, None] < k, other=0.)
- acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
- A += BLOCK_K * stride_ak
- B += BLOCK_K * stride_bk
- # rematerialize rm and rn to save registers
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
- idx_m = rm[:, None]
- idx_n = rn[None, :]
- mask = (idx_m < M) & (idx_n < N)
- # inductor generates a suffix
- {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
- """,
- )
- aten_mm = ExternKernelChoice(torch.mm, "at::mm_out")
- aten_addmm = ExternKernelChoice(torch.addmm, "at::addmm_out")
- def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1):
- """
- Giving torch.addmm a 1D tensor calls a different (faster) cublasLt
- kernel under the hood. There are a few shapes where this is slower,
- but they are rare.
- """
- if inp.stride(0) == 0 or inp.size(0) == 1:
- return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta)
- return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta)
- aten_bias_addmm = ExternKernelChoice(bias_addmm, None)
- @register_lowering(aten.mm)
- def tuned_mm(mat1, mat2, *, layout=None):
- m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
- # options to tune from
- choices = [aten_mm.bind((mat1, mat2), layout)]
- if use_triton_template(layout):
- for config in mm_configs():
- choices.append(
- mm_template.generate(
- (mat1, mat2),
- layout,
- **mm_options(config, k, layout),
- )
- )
- return autotune_select_algorithm(choices, [mat1, mat2], layout)
- @register_lowering(aten.addmm)
- def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
- m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout)
- if not use_triton_template(layout):
- choices = [aten_addmm.bind((inp, mat1, mat2), layout, alpha=alpha, beta=beta)]
- return autotune_select_algorithm(choices, [inp, mat1, mat2], layout)
- choices = [
- aten_addmm.bind((inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta)
- ]
- if inp_expanded.get_stride()[0] == 0 and inp_expanded.get_device().type == "cuda":
- # unexpand inp to make sure fused addmm from cublasLt is used
- choices.insert(
- 0,
- aten_bias_addmm.bind(
- (inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta
- ),
- )
- for config in mm_configs():
- choices.append(
- mm_template.generate(
- (inp_expanded, mat1, mat2),
- layout,
- **mm_options(config, k, layout),
- prefix_args=1,
- epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
- )
- )
- return autotune_select_algorithm(choices, [inp_expanded, mat1, mat2], layout)
|