#pragma once // @generated by torchgen/gen.py from NativeFunction.h #include #include #include #include #include #include #include #include #include #include namespace at { namespace native { TORCH_API at::Tensor std(const at::Tensor & self, bool unbiased=true); TORCH_API at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false); TORCH_API at::Tensor & std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); TORCH_API at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional correction=c10::nullopt, bool keepdim=false); TORCH_API at::Tensor & std_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out); TORCH_API at::Tensor std_quantized_cpu(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional correction=c10::nullopt, bool keepdim=false); TORCH_API at::Tensor & std_out_quantized_cpu(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out); TORCH_API at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false); TORCH_API at::Tensor & std_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); TORCH_API at::Tensor std(const at::Tensor & self, at::DimnameList dim, c10::optional correction=c10::nullopt, bool keepdim=false); TORCH_API at::Tensor & std_out(const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim, at::Tensor & out); } // namespace native } // namespace at