#pragma once // @generated by torchgen/gen.py from Function.h #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { // aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor inline at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); } namespace symint { template ::value>> at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); } } // aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor inline at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); } namespace symint { template ::value>> at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); } } }