Program Listing for File ops.h#
↰ Return to documentation for file (torch/csrc/stable/ops.h)
#pragma once
#include <torch/csrc/stable/stableivalue_conversions.h>
#include <array>
#include <cstdint>
#include <optional>
#include <string>
#include <torch/csrc/inductor/aoti_torch/generated/c_shim_aten.h>
#include <torch/csrc/stable/c/shim.h>
#include <torch/csrc/stable/version.h>
#include <torch/headeronly/core/ScalarType.h>
#include <torch/headeronly/macros/Macros.h>
#include <torch/headeronly/util/HeaderOnlyArrayRef.h>
HIDDEN_NAMESPACE_BEGIN(torch, stable)
inline torch::stable::Tensor empty_like(const torch::stable::Tensor& self) {
const auto num_args = 6;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::empty_like", "", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::empty_like", "", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor fill_(
const torch::stable::Tensor& self,
double value) {
TORCH_ERROR_CODE_CHECK(aoti_torch_aten_fill__Scalar(self.get(), value));
return self;
}
inline torch::stable::Tensor narrow(
torch::stable::Tensor& self,
int64_t dim,
int64_t start,
int64_t length) {
AtenTensorHandle ret0 = nullptr;
TORCH_ERROR_CODE_CHECK(
aoti_torch_aten_narrow(self.get(), dim, start, length, &ret0));
return torch::stable::Tensor(ret0);
}
#if TORCH_FEATURE_VERSION < TORCH_VERSION_2_10_0
inline torch::stable::Tensor new_empty(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef size,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt) {
int32_t device_type;
TORCH_ERROR_CODE_CHECK(aoti_torch_get_device_type(self.get(), &device_type));
int32_t device_index;
TORCH_ERROR_CODE_CHECK(
aoti_torch_get_device_index(self.get(), &device_index));
int32_t target_dtype;
if (dtype.has_value()) {
target_dtype = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(dtype.value()));
} else {
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(self.get(), &target_dtype));
}
int32_t layout;
TORCH_ERROR_CODE_CHECK(aoti_torch_get_layout(self.get(), &layout));
AtenTensorHandle ret0;
TORCH_ERROR_CODE_CHECK(aoti_torch_aten_new_empty(
self.get(),
size.data(),
static_cast<int64_t>(size.size()),
&target_dtype,
&layout,
&device_type,
device_index,
nullptr, // pin_memory (nullptr for default)
&ret0));
return torch::stable::Tensor(ret0);
}
inline torch::stable::Tensor new_zeros(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef size,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt) {
int32_t device_type;
TORCH_ERROR_CODE_CHECK(aoti_torch_get_device_type(self.get(), &device_type));
int32_t device_index;
TORCH_ERROR_CODE_CHECK(
aoti_torch_get_device_index(self.get(), &device_index));
int32_t target_dtype;
if (dtype.has_value()) {
target_dtype = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(dtype.value()));
} else {
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(self.get(), &target_dtype));
}
int32_t layout;
TORCH_ERROR_CODE_CHECK(aoti_torch_get_layout(self.get(), &layout));
AtenTensorHandle ath;
TORCH_ERROR_CODE_CHECK(aoti_torch_aten_new_zeros(
self.get(),
size.data(),
static_cast<int64_t>(size.size()),
&target_dtype,
&layout,
&device_type,
device_index,
nullptr, // pin_memory (nullptr for default)
&ath));
return torch::stable::Tensor(ath);
}
#endif // TORCH_FEATURE_VERSION < TORCH_VERSION_2_10_0
inline torch::stable::Tensor pad(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef pad,
const std::string& mode = "constant",
double value = 0.0) {
AtenTensorHandle ret0 = nullptr;
TORCH_ERROR_CODE_CHECK(aoti_torch_aten_pad(
self.get(), pad.data(), pad.size(), mode.c_str(), &value, &ret0));
return torch::stable::Tensor(ret0);
}
inline torch::stable::Tensor amax(
const torch::stable::Tensor& self,
int64_t dim,
bool keepdim = false) {
AtenTensorHandle ret = nullptr;
TORCH_ERROR_CODE_CHECK(
aoti_torch_aten_amax(self.get(), &dim, 1, keepdim, &ret));
return torch::stable::Tensor(ret);
}
inline torch::stable::Tensor amax(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef dims,
bool keepdim = false) {
AtenTensorHandle ret = nullptr;
TORCH_ERROR_CODE_CHECK(aoti_torch_aten_amax(
self.get(),
dims.data(),
static_cast<int64_t>(dims.size()),
keepdim,
&ret));
return torch::stable::Tensor(ret);
}
inline torch::stable::Tensor transpose(
const torch::stable::Tensor& self,
int64_t dim0,
int64_t dim1) {
const auto num_args = 3;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(dim0),
torch::stable::detail::from(dim1)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::transpose", "int", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::transpose", "int", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor zero_(torch::stable::Tensor& self) {
const auto num_args = 1;
std::array<StableIValue, num_args> stack{torch::stable::detail::from(self)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::zero_", "", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::zero_", "", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor copy_(
torch::stable::Tensor& self,
const torch::stable::Tensor& src,
std::optional<bool> non_blocking = std::nullopt) {
const auto num_args = 3;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(src),
torch::stable::detail::from(non_blocking.value_or(false))};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::copy_", "", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::copy_", "", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor clone(const torch::stable::Tensor& self) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(std::nullopt)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::clone", "", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::clone", "", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor flatten(
const torch::stable::Tensor& self,
int64_t start_dim = 0,
int64_t end_dim = -1) {
const auto num_args = 3;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(start_dim),
torch::stable::detail::from(end_dim)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::flatten", "using_ints", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::flatten", "using_ints", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor unsqueeze(
const torch::stable::Tensor& self,
int64_t dim) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self), torch::stable::detail::from(dim)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::unsqueeze", "", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::unsqueeze", "", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor squeeze(
const torch::stable::Tensor& self,
int64_t dim) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self), torch::stable::detail::from(dim)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::squeeze", "dim", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::squeeze", "dim", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor select(
const torch::stable::Tensor& self,
int64_t dim,
int64_t index) {
const auto num_args = 3;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(dim),
torch::stable::detail::from(index)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::select", "int", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::select", "int", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor matmul(
const torch::stable::Tensor& self,
const torch::stable::Tensor& other) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self), torch::stable::detail::from(other)};
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::matmul", "", stack.data(), TORCH_ABI_VERSION));
#else
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::matmul", "", stack.data()));
#endif
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
#if TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
auto callback = [](int64_t cb_begin, int64_t cb_end, void* ctx) {
const F* func = static_cast<const F*>(ctx);
(*func)(cb_begin, cb_end);
};
TORCH_ERROR_CODE_CHECK(torch_parallel_for(
begin,
end,
grain_size,
callback,
const_cast<void*>(static_cast<const void*>(&f))));
}
inline uint32_t get_num_threads() {
uint32_t num_threads;
TORCH_ERROR_CODE_CHECK(torch_get_num_threads(&num_threads));
return num_threads;
}
inline torch::stable::Tensor empty(
torch::headeronly::IntHeaderOnlyArrayRef size,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt,
std::optional<torch::headeronly::Layout> layout = std::nullopt,
std::optional<torch::stable::Device> device = std::nullopt,
std::optional<bool> pin_memory = std::nullopt,
std::optional<torch::headeronly::MemoryFormat> memory_format =
std::nullopt) {
const auto num_args = 6;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(size),
torch::stable::detail::from(dtype),
torch::stable::detail::from(layout),
torch::stable::detail::from(device),
torch::stable::detail::from(pin_memory),
torch::stable::detail::from(memory_format)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::empty", "memory_format", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor reshape(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef shape) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self), torch::stable::detail::from(shape)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::reshape", "", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor view(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef size) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self), torch::stable::detail::from(size)};
TORCH_ERROR_CODE_CHECK(
torch_call_dispatcher("aten::view", "", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor from_blob(
void* data,
torch::headeronly::IntHeaderOnlyArrayRef sizes,
torch::headeronly::IntHeaderOnlyArrayRef strides,
torch::stable::Device device,
torch::headeronly::ScalarType dtype,
int64_t storage_offset = 0,
torch::headeronly::Layout layout = torch::headeronly::Layout::Strided) {
auto shim_dtype =
torch::stable::detail::to<int32_t>(torch::stable::detail::from(dtype));
auto shim_device_type = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(device.type()));
auto shim_layout =
torch::stable::detail::to<int32_t>(torch::stable::detail::from(layout));
AtenTensorHandle ath;
TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob_v2(
data,
sizes.size(),
sizes.data(),
strides.data(),
storage_offset,
shim_dtype,
shim_device_type,
device.index(),
&ath,
shim_layout,
nullptr,
0));
return torch::stable::Tensor(ath);
}
inline torch::stable::Tensor to(
const torch::stable::Tensor& self,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt,
std::optional<torch::headeronly::Layout> layout = std::nullopt,
std::optional<torch::stable::Device> device = std::nullopt,
std::optional<bool> pin_memory = std::nullopt,
bool non_blocking = false,
bool copy = false,
std::optional<torch::headeronly::MemoryFormat> memory_format =
std::nullopt) {
const auto num_args = 8;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(dtype),
torch::stable::detail::from(layout),
torch::stable::detail::from(device),
torch::stable::detail::from(pin_memory),
torch::stable::detail::from(non_blocking),
torch::stable::detail::from(copy),
torch::stable::detail::from(memory_format)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::to", "dtype_layout", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor to(
const torch::stable::Tensor& self,
torch::stable::Device device,
bool non_blocking = false,
bool copy = false) {
return to(
self,
std::nullopt,
std::nullopt,
device,
std::nullopt,
non_blocking,
copy,
std::nullopt);
}
inline torch::stable::Tensor contiguous(
const torch::stable::Tensor& self,
torch::headeronly::MemoryFormat memory_format =
torch::headeronly::MemoryFormat::Contiguous) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(memory_format)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::contiguous", "", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor new_empty(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef size,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt,
std::optional<torch::headeronly::Layout> layout = std::nullopt,
std::optional<torch::stable::Device> device = std::nullopt,
std::optional<bool> pin_memory = std::nullopt) {
const auto num_args = 6;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(size),
torch::stable::detail::from(dtype),
torch::stable::detail::from(layout),
torch::stable::detail::from(device),
torch::stable::detail::from(pin_memory)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::new_empty", "", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor new_zeros(
const torch::stable::Tensor& self,
torch::headeronly::IntHeaderOnlyArrayRef size,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt,
std::optional<torch::headeronly::Layout> layout = std::nullopt,
std::optional<torch::stable::Device> device = std::nullopt,
std::optional<bool> pin_memory = std::nullopt) {
const auto num_args = 6;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(size),
torch::stable::detail::from(dtype),
torch::stable::detail::from(layout),
torch::stable::detail::from(device),
torch::stable::detail::from(pin_memory)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::new_zeros", "", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor sum(
const torch::stable::Tensor& self,
std::optional<torch::headeronly::IntHeaderOnlyArrayRef> dim = std::nullopt,
bool keepdim = false,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt) {
const auto num_args = 4;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(dim),
torch::stable::detail::from(keepdim),
torch::stable::detail::from(dtype)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::sum", "dim_IntList", stack.data(), TORCH_ABI_VERSION));
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
inline torch::stable::Tensor& sum_out(
torch::stable::Tensor& out,
const torch::stable::Tensor& self,
std::optional<torch::headeronly::IntHeaderOnlyArrayRef> dim = std::nullopt,
bool keepdim = false,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt) {
const auto num_args = 5;
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(dim),
torch::stable::detail::from(keepdim),
torch::stable::detail::from(dtype),
torch::stable::detail::from(out)};
TORCH_ERROR_CODE_CHECK(torch_call_dispatcher(
"aten::sum", "IntList_out", stack.data(), TORCH_ABI_VERSION));
// Clean up the handle in stack[0], discard the temporary
(void)torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
return out;
}
inline torch::stable::Tensor subtract(
const torch::stable::Tensor& self,
const torch::stable::Tensor& other,
double alpha = 1.0) {
AtenTensorHandle ret0;
TORCH_ERROR_CODE_CHECK(
aoti_torch_aten_subtract_Tensor(self.get(), other.get(), alpha, &ret0));
return torch::stable::Tensor(ret0);
}
inline torch::stable::Tensor full(
torch::headeronly::IntHeaderOnlyArrayRef size,
double fill_value,
std::optional<torch::headeronly::ScalarType> dtype = std::nullopt,
std::optional<torch::headeronly::Layout> layout = std::nullopt,
std::optional<torch::stable::Device> device = std::nullopt,
std::optional<bool> pin_memory = std::nullopt) {
int32_t* dtype_ptr = nullptr;
int32_t dtype_val;
if (dtype.has_value()) {
dtype_val = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(dtype.value()));
dtype_ptr = &dtype_val;
}
int32_t* layout_ptr = nullptr;
int32_t layout_val;
if (layout.has_value()) {
layout_val = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(layout.value()));
layout_ptr = &layout_val;
}
int32_t* device_type_ptr = nullptr;
int32_t device_type_val;
int32_t device_index = 0;
if (device.has_value()) {
device_type_val = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(device.value().type()));
device_type_ptr = &device_type_val;
device_index = device.value().index();
}
int32_t* pin_memory_ptr = nullptr;
int32_t pin_memory_val;
if (pin_memory.has_value()) {
pin_memory_val = pin_memory.value() ? 1 : 0;
pin_memory_ptr = &pin_memory_val;
}
AtenTensorHandle ret0;
TORCH_ERROR_CODE_CHECK(aoti_torch_aten_full(
size.data(),
static_cast<int64_t>(size.size()),
fill_value,
dtype_ptr,
layout_ptr,
device_type_ptr,
device_index,
pin_memory_ptr,
&ret0));
return torch::stable::Tensor(ret0);
}
#endif // TORCH_FEATURE_VERSION >= TORCH_VERSION_2_10_0
HIDDEN_NAMESPACE_END(torch, stable)