blob: 5d90f64e4e11709519bc6c08672d1cbac5cab8d7 [file] [log] [blame]
# See README.md in this directory for more guidance
# *********NB: _cast_* operators are DEPRECATED and will be removed
# eventually. These were previously used before TorchScript IR supported
# representing ScalarType's. They are now superseded by usage of
# `aten::to()`. The ops remain here for backward compatibility purposes.
# DEPRECATED. DO NOT USE
- func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# DEPRECATED. DO NOT USE
- func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor
variants: function
# Computes the gradient of current tensor w.r.t. graph leaves.
- func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
manual_cpp_binding: True
variants: method
# DEPRECATED. Sets the tensor data held by this `Variable` to be the same as
# `new_data`. It requires that `new_data` and `Variable` have compatible tensor
# type, by checking `_has_compatible_shallow_copy_type(this, new_data)`.
#
# This function is deprecated because it doesn't really make sense in a world
# where Variables *are* Tensors (as opposed to them containing tensors, which
# is what the previous interpretation was.)
- func: set_data(Tensor(a!) self, Tensor new_data) -> ()
manual_cpp_binding: True
variants: method
- func: data(Tensor self) -> Tensor
manual_cpp_binding: True
variants: method
# True if this `Variable` is a leaf and thus does not have a `grad_fn`.
- func: is_leaf(Tensor self) -> bool
manual_cpp_binding: True
variants: method
# Returns the output index of this variable from the forward operation that
# produced it. Conversely, it returns the input index of the gradient `Node` to
# which this `Variable` is connected (because in the gradient computation,
# inputs and outputs switch meaning). For example:
#
# y0, y1, y2 = f(x)
# assert y0.output_nr == 0
# assert y1.output_nr == 1
# assert y2.output_nr == 2
#
- func: output_nr(Tensor self) -> int
manual_cpp_binding: True
variants: method
- func: _version(Tensor self) -> int
manual_cpp_binding: True
variants: method
- func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
manual_cpp_binding: True
variants: method
# Enables .grad attribute for non-leaf Tensors.
- func: retain_grad(Tensor(a!) self) -> ()
manual_cpp_binding: True
variants: method
- func: retains_grad(Tensor self) -> bool
manual_cpp_binding: True
variants: method
- func: _fw_primal(Tensor(a) self, int level) -> Tensor(a)
variants: method
dispatch:
CompositeExplicitAutograd: _fw_primal
- func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
variants: function
dispatch:
CompositeExplicitAutograd: _make_dual
- func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
variants: function
# NOTE: [_new_zeros_with_same_feature_meta]
# This function creates a new tensor with the layout and TensorOptions
# of `other` but also takes into account the batch dimensions of `self`
#
# This function has a couple extra constraints because it is also used for `jvp`
# in functorch.
# - is used for forward AD because there is the restriction
# that the primal and tangent must have the same layout
# - We cannot assume that `self` and `other` have the same sizes or even dim
# because in the inplace over view case, `other` is the base tensor, and
# `self` is the forward grad with respect to the view, which can have an
# entirely different shape
# - takes the number of batch dims for `self` because we also handle
# some batching logic. We handle that here instead of a batching rule because
# we'd like to avoid calling as_strided in the batching rule (as to enable
# nested vmap in functorch).
# - needs to be CompositeExplicitAutograd for jvp support in functorch.
# functorch currently relies on TensorWrapper which does not have storage
# CompositeExplicitAutograd makes sure the TensorWrapper is unwrapped.
# - this function may eventually take on another int argument to store the
# the number of batch dims for other once we support that use case
- func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _new_zeros_with_same_feature_meta
autogen: _new_zeros_with_same_feature_meta.out
# This function compares the storage numel of self with that of other, where
# storage numel is computed as: `other.storage().nbytes() / other.itemsize()`.
# We create this function for composite compliance purposes. The batching rule
# always returns true because vmapped as_strided does not support accessing
# storage locations not indexable by the input tensor.
# See the note above for more information.
- func: _has_same_storage_numel(Tensor self, Tensor other) -> bool
variants: function
dispatch:
CompositeExplicitAutograd: _has_same_storage_numel
- func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
variants: method
tags: inplace_view
- func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
variants: method
- func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
variants: method
- func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
variants: method
- func: align_as(Tensor self, Tensor other) -> Tensor
variants: method
- func: align_tensors(Tensor[] tensors) -> Tensor[]
# Not assert because it's a keyword; not Assert because FX already
# took that syntax
# TODO: need to specify this is side-effectful somehow
- func: _assert_async(Tensor self) -> ()
dispatch:
CPU: _assert_async_cpu
CUDA: _assert_async_cuda
- func: _assert_async.msg(Tensor self, str assert_msg) -> ()
dispatch:
CPU: _assert_async_msg_cpu
CUDA: _assert_async_msg_cuda
- func: _assert_scalar(Scalar self, str assert_msg) -> ()
dispatch:
CompositeExplicitAutograd: _assert_scalar
- func: _functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor
dispatch:
CompositeExplicitAutograd: _functional_assert_scalar
- func: _functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor
dispatch:
CPU: _functional_assert_async_msg_cpu
- func: _assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> ()
- func: _print(str s) -> ()
dispatch:
CompositeExplicitAutograd: _print
- func: sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()
dispatch:
CompositeExplicitAutograd: sym_constrain_range
- func: sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()
dispatch:
CompositeExplicitAutograd: sym_constrain_range_for_size
- func: _functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
dispatch:
CompositeExplicitAutograd: _functional_sym_constrain_range
- func: _functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
dispatch:
CompositeExplicitAutograd: _functional_sym_constrain_range_for_size
- func: _make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
CPU: _make_dep_token_cpu
- func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
variants: method
- func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
dispatch:
CUDA: _use_cudnn_ctc_loss
- func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
device_check: NoCheck # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
dispatch:
CUDA: _use_cudnn_ctc_loss_tensor
- func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
dispatch:
CUDA: _cudnn_ctc_loss
autogen: _cudnn_ctc_loss.out
- func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU
dispatch:
CUDA: _cudnn_ctc_loss_tensor
- func: _use_cudnn_rnn_flatten_weight() -> bool
- func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
dispatch:
CUDA: _cudnn_rnn_flatten_weight
autogen: _cudnn_rnn_flatten_weight.out
- func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
# rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check.
# Disable dispatch time device check for consistent behavior.
device_check: NoCheck
dispatch:
CUDA: _cudnn_rnn
autogen: _cudnn_rnn.out
tags: nondeterministic_seeded
- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
dispatch:
CUDA: _cudnn_rnn_backward
autogen: _cudnn_rnn_backward.out
- func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
CUDA: _cudnn_init_dropout_state
autogen: _cudnn_init_dropout_state.out
tags: nondeterministic_seeded
- func: _debug_has_internal_overlap(Tensor self) -> int
variants: function
- func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
variants: function
dispatch:
CUDA: fused_dropout_cuda
tags: nondeterministic_seeded
autogen: _fused_dropout.out
- func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
variants: function
dispatch:
CUDA: masked_scale_cuda
autogen: _masked_scale.out
- func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: native_dropout_cpu
CUDA: native_dropout_cuda
NestedTensorCPU, NestedTensorCUDA: native_dropout_nested
tags: [nondeterministic_seeded, core]
autogen: native_dropout.out
- func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
dispatch:
CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward
CUDA: native_dropout_backward_cuda
autogen: native_dropout_backward.out
tags: pointwise
- func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
- func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
- func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
- func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
- func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
- func: _shape_as_tensor(Tensor self) -> Tensor
- func: dropout(Tensor input, float p, bool train) -> Tensor
tags: nondeterministic_seeded
- func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
tags: nondeterministic_seeded
- func: feature_dropout(Tensor input, float p, bool train) -> Tensor
tags: nondeterministic_seeded
- func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
tags: nondeterministic_seeded
- func: alpha_dropout(Tensor input, float p, bool train) -> Tensor
tags: nondeterministic_seeded
- func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
tags: nondeterministic_seeded
- func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
tags: nondeterministic_seeded
- func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
tags: nondeterministic_seeded
- func: abs(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: abs
SparseCPU, SparseCUDA: abs_sparse
SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_abs
tags: [core, pointwise]
- func: abs_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: abs_
SparseCPU, SparseCUDA: abs_sparse_
SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_abs_
- func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: abs_out
MPS: abs_out_mps
SparseCPU, SparseCUDA: abs_sparse_out
SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out
tags: pointwise
# Note [Adding an alias]
# To add an alias do the following:
#
# 1) Copy the original functions native_functions.yaml entry, but replace the
# original function's name with their own and delete any dispatch
# keys for the aliases. Specifying a dispatch key will prevent
# autograd from recording the operations the alias performs, which
# will stop it from "inheriting" the original operation's autograd behavior.
# 2) Implement the corresponding functions and have them redispatch to the
# original function.
# 3) Add docstrings to the new function that reference the original function,
# and document the method as usual (if it exists.)
# (See torch/_torch_docs.py and docs/source/torch.rst if adding a function,
# torch/_tensor_docs.py and docs/source/tensors.rst if adding a method,
# or module-specific doc bindings (like torch/linalg/__init__.py) if
# adding an alias in a namespace.)
# 4) Update torch/overrides.py consistent with the original function.
# 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
# 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry
# in op_db list in torch/testing/_internal/common_methods_invocations.py
#
# See torch.absolute, an alias for torch.abs, as an example.
# Absolute, alias for abs
- func: absolute(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: absolute_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: angle(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: angle
SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr
tags: pointwise
- func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: angle_out
SparseCsrCPU, SparseCsrCUDA: angle_sparse_csr_out
tags: pointwise
- func: view_as_real(Tensor(a) self) -> Tensor(a)
variants: function
dispatch:
CPU, CUDA, MPS, Meta: view_as_real
- func: view_as_complex(Tensor(a) self) -> Tensor(a)
variants: function
dispatch:
CPU, CUDA, MPS, Meta: view_as_complex
- func: sgn(Tensor self) -> Tensor
variants: function, method
structured_delegate: sgn.out
dispatch:
SparseCPU, SparseCUDA: sgn_sparse
SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_sgn
tags: pointwise
- func: sgn_(Tensor(a!) self) -> Tensor(a!)
variants: method
structured_delegate: sgn.out
dispatch:
SparseCPU, SparseCUDA: sgn_sparse_
SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_sgn_
tags: pointwise
- func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sgn_out
MPS: sgn_out_mps
SparseCPU, SparseCUDA: sgn_sparse_out
SparseCsrCPU, SparseCsrCUDA: sgn_sparse_csr_out
tags: pointwise
- func: chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
variants: method
- func: real(Tensor(a) self) -> Tensor(a)
device_check: NoCheck # TensorIterator
variants: function
- func: imag(Tensor(a) self) -> Tensor(a)
device_check: NoCheck # TensorIterator
variants: function
- func: _conj(Tensor(a) self) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: _conj
- func: conj(Tensor(a) self) -> Tensor(a)
variants: function, method
manual_cpp_binding: True
- func: _conj_physical(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: _conj_physical
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr
autogen: _conj_physical.out
- func: conj_physical(Tensor self) -> Tensor
variants: function, method
tags: pointwise
- func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: conj_physical_out
MPS: conj_physical_out_mps
SparseCPU, SparseCUDA: conj_physical_out_sparse
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_out
tags: pointwise
- func: conj_physical_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: conj_physical_
SparseCsrCPU, SparseCsrCUDA: conj_physical_sparse_csr_
tags: pointwise
- func: resolve_conj(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: resolve_neg(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: _neg_view(Tensor(a) self) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: _neg_view
- func: acos(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: acos.out
tags: [core, pointwise]
- func: acos_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: acos.out
tags: pointwise
- func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: acos_out
MPS: acos_out_mps
tags: pointwise
# arccos, alias of acos
- func: arccos(Tensor self) -> Tensor
variants: function, method
- func: arccos_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
tags: core
- func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
tags: core
# Return: (Tensor output, Tensor indices)
- func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
- func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: add.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: add_sparse
SparseCsrCPU, SparseCsrCUDA: add_sparse_csr
MkldnnCPU: mkldnn_add
ZeroTensor: add_zerotensor
NestedTensorCPU, NestedTensorCUDA: NestedTensor_add_Tensor
tags: [core, pointwise]
- func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: add.out
dispatch:
SparseCPU, SparseCUDA: add_sparse_
SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_
MkldnnCPU: mkldnn_add_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_add__Tensor
tags: pointwise
- func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
ufunc_inner_loop:
Generic: add (AllAndComplex, BFloat16, Half, ComplexHalf)
ScalarOnly: add (Bool)
dispatch:
SparseCPU: add_out_sparse_cpu
SparseCUDA: add_out_sparse_cuda
SparseCsrCPU: add_out_sparse_compressed_cpu
SparseCsrCUDA: add_out_sparse_compressed_cuda
MkldnnCPU: mkldnn_add_out
MPS: add_out_mps
tags: pointwise
- func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
variants: function
dispatch:
CPU: add_relu
- func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
variants: function
dispatch:
CPU: add_relu_
- func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CPU: add_relu_out
- func: _add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
variants: function
dispatch:
CPU: add_relu
- func: _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
variants: function
dispatch:
CPU: add_relu_
autogen: _add_relu.Scalar_out
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: add
tags: [core, pointwise]
- func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: add_
autogen: add.Scalar_out
tags: pointwise
- func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
structured_delegate: addmv.out
variants: function, method
- func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
structured_delegate: addmv.out
variants: function, method
- func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: addmv_out_cpu
CUDA: addmv_out_cuda
MPS: addmv_out_mps
SparseCsrCPU: addmv_out_sparse_compressed
SparseCsrCUDA: addmv_out_sparse_compressed_cuda
- func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: addr
MPS: addr_mps
CompositeExplicitAutograd: math_addr
- func: addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: addr_
- func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: addr_out
MPS: addr_out_mps
CompositeExplicitAutograd: math_addr_out
- func: affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: affine_grid_generator
autogen: affine_grid_generator.out
- func: affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor
variants: function
- func: _is_all_true(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: _is_all_true
- func: _is_any_true(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: _is_any_true
# Note: this function is only for testing.
- func: _test_check_tensor(Tensor self) -> Tensor
variants: function
# Note; this function is only for testing
- func: _test_functorch_fallback(Tensor self, Tensor other) -> Tensor
variants: function
dispatch:
CPU: _test_functorch_fallback
autogen: _test_functorch_fallback.out
- func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: all.out
variants: function, method
- func: all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: all.dims_out
variants: function, method
cpp_no_default_args: ['dim']
dispatch:
CompositeExplicitAutograd: all_dims_default
- func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: all_out
MPS: all_out_mps
- func: all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: all_dims_out
CompositeExplicitAutograd: all_dims_out_default
cpp_no_default_args: ['dim']
- func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
variants: function, method
tags: data_dependent_output
dispatch:
CompositeExplicitAutograd: allclose
- func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: any.out
variants: function, method
tags: core
- func: any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: any.dims_out
variants: function, method
cpp_no_default_args: ['dim']
tags: core
dispatch:
CompositeExplicitAutograd: any_dims_default
- func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: any_out
MPS: any_out_mps
- func: any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: any_dims_out
CompositeExplicitAutograd: any_dims_out_default
cpp_no_default_args: ['dim']
- func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: arange
- func: arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: arange
# This operator should be named `arange.start_out` if following the naming convention. However that
# name is already taken. Disabled because of CI job failures.
# FIXME: enable this
#- func: arange.start_out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
# dispatch:
# CompositeExplicitAutograd: arange_start_out
- func: arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: arange
cpp_no_default_args: ['step']
tags: core
- func: arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: arange_out
- func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: arange_out
CUDA: arange_cuda_out
MPS: arange_mps_out
cpp_no_default_args: ['step']
# This function is a temporary hack to allow tracing of arange like constructs with dynamic
# bounds on arange. Normal arange is not traceable because it does not take any tensor inputs;
# if the range you need is based on another tensor, calling this function directly will
# preserve tracing. Get rid of this when arange can directly take tensors for bounds
# (so that it can be traced directly).
- func: _dim_arange(Tensor like, int dim) -> Tensor
- func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
structured_delegate: argmax.out
device_check: NoCheck # TensorIterator
variants: function, method
tags: core
- func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: argmax_out
MPS: argmax_out_mps
- func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
structured_delegate: argmin.out
device_check: NoCheck # TensorIterator
variants: function, method
tags: core
- func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: argmin_out
MPS: argmin_out_mps
- func: acosh(Tensor self) -> Tensor
variants: function, method
structured_delegate: acosh.out
tags: [core, pointwise]
- func: acosh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
structured_delegate: acosh.out
tags: pointwise
- func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: acosh_out
MPS: acosh_out_mps
tags: pointwise
# arccosh, alias for acosh
- func: arccosh(Tensor self) -> Tensor
variants: function, method
- func: arccosh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: asinh(Tensor self) -> Tensor
variants: function, method
structured_delegate: asinh.out
dispatch:
SparseCPU, SparseCUDA: asinh_sparse
SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr
tags: [core, pointwise]
- func: asinh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
structured_delegate: asinh.out
dispatch:
SparseCPU, SparseCUDA: asinh_sparse_
SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_
tags: pointwise
- func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: asinh_out
MPS: asinh_out_mps
SparseCPU, SparseCUDA: asinh_sparse_out
SparseCsrCPU, SparseCsrCUDA: asinh_sparse_csr_out
tags: pointwise
# arcsinh, alias for asinh
- func: arcsinh(Tensor self) -> Tensor
variants: function, method
- func: arcsinh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: atanh(Tensor self) -> Tensor
structured_delegate: atanh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atanh_sparse
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr
tags: [core, pointwise]
- func: atanh_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: atanh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atanh_sparse_
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_
tags: pointwise
- func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: atanh_out
MPS: atanh_out_mps
SparseCPU, SparseCUDA: atanh_sparse_out
SparseCsrCPU, SparseCsrCUDA: atanh_sparse_csr_out
tags: pointwise
# arctanh, alias for atanh
- func: arctanh(Tensor self) -> Tensor
variants: function, method
- func: arctanh_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
variants: function, method
dispatch:
ZeroTensor, CPU, CUDA: as_strided_tensorimpl
Meta: as_strided_tensorimpl_meta_symint
MPS: as_strided_tensorimpl_mps
QuantizedCPU, QuantizedCUDA: as_strided_qtensorimpl
device_check: NoCheck
device_guard: False
tags: core
- func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutogradNonFunctional: as_strided__symint
- func: asin(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: asin.out
dispatch:
SparseCPU, SparseCUDA: asin_sparse
SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr
tags: [core, pointwise]
- func: asin_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: asin.out
dispatch:
SparseCPU, SparseCUDA: asin_sparse_
SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_
tags: pointwise
- func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: asin_out
MPS: asin_out_mps
SparseCPU, SparseCUDA: asin_sparse_out
SparseCsrCPU, SparseCsrCUDA: asin_sparse_csr_out
tags: pointwise
# arcsin, alias of asin
- func: arcsin(Tensor self) -> Tensor
variants: function, method
- func: arcsin_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: atan(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: atan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atan_sparse
SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr
tags: [core, pointwise]
- func: atan_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: atan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: atan_sparse_
SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_
tags: pointwise
- func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: atan_out
MPS: atan_out_mps
SparseCPU, SparseCUDA: atan_sparse_out
SparseCsrCPU, SparseCsrCUDA: atan_sparse_csr_out
tags: pointwise
# arctan, alias of atan
- func: arctan(Tensor self) -> Tensor
variants: function, method
- func: arctan_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: atleast_1d(Tensor self) -> Tensor
variants: function
- func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
- func: atleast_2d(Tensor self) -> Tensor
variants: function
- func: atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
variants: function
- func: atleast_3d(Tensor self) -> Tensor
variants: function
- func: atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
variants: function
- func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
variants: function, method
structured_delegate: baddbmm.out
- func: baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
variants: method
structured_delegate: baddbmm.out
- func: baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU: baddbmm_out_cpu
CUDA: baddbmm_out_cuda
MPS: baddbmm_out_mps
SparseCsrCUDA: baddbmm_out_sparse_csr_cuda
- func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: bartlett_window
autogen: bartlett_window.out
- func: bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: bartlett_window
autogen: bartlett_window.periodic_out
- func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
- func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
dispatch:
QuantizedCPU: quantized_batch_norm
autogen: quantized_batch_norm.out
- func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
- func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
# Sample bernoulli with values in `self` as probability.
- func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: bernoulli
tags: nondeterministic_seeded
- func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: bernoulli_out
MPS: bernoulli_out_mps
- func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: bernoulli_
MPS: bernoulli_mps_
autogen: bernoulli.Tensor, bernoulli.Tensor_out
- func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: bernoulli_
MPS: bernoulli_mps_
autogen: bernoulli.float_out
# Note [bernoulli.p schema]
# We should probably just fix the overload ambiguity by appending a _functional to the C++ API name (BC breaking)
# This out-of-place version isn't used explicitly, but needed by jit.
# There is no default valid on `p` here because it would introduce ambiguity
# with `bernoulli(Tensor self, *, Generator? generator=None)` declaration.
- func: bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutogradNonFunctional: bernoulli
- func: bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
device_check: NoCheck # TensorIterator
python_module: nn
variants: function
dispatch:
CPU: binary_cross_entropy_cpu
CUDA: binary_cross_entropy_cuda
MPS: binary_cross_entropy_mps
- func: binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
variants: function
dispatch:
CPU: binary_cross_entropy_out_cpu
CUDA: binary_cross_entropy_out_cuda
MPS: binary_cross_entropy_out_mps
- func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
python_module: nn
variants: function
dispatch:
CPU: binary_cross_entropy_backward_cpu
CUDA: binary_cross_entropy_backward_cuda
MPS: binary_cross_entropy_backward_mps
- func: binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
variants: function
dispatch:
CPU: binary_cross_entropy_backward_out_cpu
CUDA: binary_cross_entropy_backward_out_cuda
MPS: binary_cross_entropy_backward_out_mps
- func: binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: binary_cross_entropy_with_logits
autogen: binary_cross_entropy_with_logits.out
- func: bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
variants: function, method
dispatch:
CPU: _bincount_cpu
CUDA: _bincount_cuda
MPS: _bincount_mps
tags: dynamic_output_shape
autogen: bincount.out
- func: bitwise_not(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: bitwise_not.out
variants: function, method
tags: [core, pointwise]
- func: bitwise_not_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: bitwise_not.out
variants: method
tags: pointwise
- func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: bitwise_not_out
MPS: bitwise_not_out_mps
tags: pointwise
- func: copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA, MPS: copysign_out
tags: pointwise
- func: copysign.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: copysign.out
tags: pointwise
- func: copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: copysign.out
- func: copysign.Scalar(Tensor self, Scalar other) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: copysign
tags: pointwise
- func: copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: copysign_
- func: copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: copysign_out
tags: pointwise
- func: _lazy_clone(Tensor self) -> Tensor
# Like clone, but the copy takes place lazily, only if either the
# input or the output are written.
variants: function, method
dispatch:
CompositeExplicitAutograd: _lazy_clone
- func: logical_not(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_not
NestedTensorCPU, NestedTensorCUDA: NestedTensor_logical_not
tags: [core, pointwise]
- func: logical_not_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_not_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_logical_not_
tags: pointwise
- func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_not_out
MPS: logical_not_out_mps
tags: pointwise
- func: logical_xor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_xor
tags: [core, pointwise]
- func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_xor_
tags: pointwise
- func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_xor_out
MPS: logical_xor_out_mps
tags: pointwise
- func: logical_and(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_and
tags: [core, pointwise]
- func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_and_
tags: pointwise
- func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_and_out
MPS: logical_and_out_mps
tags: pointwise
- func: logical_or(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logical_or
tags: [core, pointwise]
- func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: logical_or_
tags: pointwise
- func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: logical_or_out
MPS: logical_or_out_mps
tags: pointwise
- func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: blackman_window
autogen: blackman_window.out
- func: blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: blackman_window
autogen: blackman_window.periodic_out
- func: bmm(Tensor self, Tensor mat2) -> Tensor
structured_delegate: bmm.out
variants: function, method
dispatch:
SparseCPU: bmm_sparse_cpu
SparseCUDA: bmm_sparse_cuda
NestedTensorCPU: bmm_nested
NestedTensorCUDA: bmm_nested_cuda
tags: core
- func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU: bmm_out_cpu
CUDA: bmm_out_cuda
MPS: bmm_out_mps
SparseCPU: bmm_out_sparse_cpu
SparseCUDA: bmm_out_sparse_cuda
SparseCsrCUDA: bmm_out_sparse_csr_cuda
- func: broadcast_tensors(Tensor[] tensors) -> Tensor[]
device_check: NoCheck
device_guard: False
- func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
variants: function, method
dispatch:
CompositeImplicitAutograd: broadcast_to_symint
- func: _sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
variants: function
dispatch:
SparseCPU, SparseCUDA: sparse_broadcast_to
- func: cat(Tensor[] tensors, int dim=0) -> Tensor
structured_delegate: cat.out
dispatch:
SparseCPU, SparseCUDA: cat_sparse
QuantizedCPU: cat_quantized_cpu
NestedTensorCPU, NestedTensorCUDA: cat_nested
tags: core
- func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
structured: True
precomputed:
- dim -> int dim, int valid, bool all_contiguous, bool all_same_dtype, bool all_same_sizes_and_stride, MemoryFormat memory_format
dispatch:
CPU: cat_out_cpu
CUDA: cat_out_cuda
MPS: cat_out_mps
QuantizedCPU: cat_out_quantized_cpu
- func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor
- func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
# alias for torch.cat
- func: concat(Tensor[] tensors, int dim=0) -> Tensor
- func: concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor
- func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
# alias for torch.cat
- func: concatenate(Tensor[] tensors, int dim=0) -> Tensor
- func: concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
- func: concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
- func: concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
- func: block_diag(Tensor[] tensors) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: block_diag
autogen: block_diag.out
- func: ceil(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: ceil.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: ceil_sparse
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr
tags: [core, pointwise]
- func: ceil_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: ceil.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: ceil_sparse_
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_
tags: pointwise
- func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: ceil_out
MPS: ceil_out_mps
SparseCPU, SparseCUDA: ceil_sparse_out
SparseCsrCPU, SparseCsrCUDA: ceil_sparse_csr_out
tags: pointwise
# alias for torch.linalg.multi_dot
- func: chain_matmul(Tensor[] matrices) -> Tensor
variants: function
# alias for torch.linalg.multi_dot
- func: chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
- func: unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
variants: function, method
device_check: NoCheck
device_guard: False
- func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: chunk
NestedTensorCPU, NestedTensorCUDA: chunk_nested_tensor
- func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
variants: function, method
dispatch:
CompositeImplicitAutograd: tensor_split_sections_symint
- func: tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
variants: function, method
dispatch:
CompositeImplicitAutograd: tensor_split_indices_symint
- func: tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
variants: function, method
- func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ['min']
structured_delegate: clamp.out
dispatch:
QuantizedCPU: clamp_quantized_cpu
tags: [core, pointwise]
- func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
variants: function, method
structured_delegate: clamp.Tensor_out
tags: [core, pointwise]
- func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ['min']
structured_delegate: clamp.out
tags: pointwise
- func: clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
variants: function, method
structured_delegate: clamp.Tensor_out
tags: pointwise
- func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
cpp_no_default_args: ['min']
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_out
MPS: clamp_out_mps
tags: pointwise
- func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_Tensor_out
MPS: clamp_Tensor_out_mps
tags: pointwise
- func: clamp_max(Tensor self, Scalar max) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_max.out
tags: pointwise
- func: clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
variants: function, method
structured_delegate: clamp_max.Tensor_out
tags: pointwise
- func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_max.out
tags: pointwise
- func: clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
variants: function, method
structured_delegate: clamp_max.Tensor_out
tags: pointwise
- func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_max_out
MPS: clamp_max_out_mps
tags: pointwise
- func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_max_Tensor_out
MPS: clamp_max_Tensor_out_mps
tags: pointwise
- func: clamp_min(Tensor self, Scalar min) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_min.out
tags: pointwise
- func: clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
variants: function, method
structured_delegate: clamp_min.Tensor_out
tags: pointwise
- func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: clamp_min.out
tags: pointwise
- func: clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
variants: function, method
structured_delegate: clamp_min.Tensor_out
tags: pointwise
- func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_min_out
MPS: clamp_min_out_mps
tags: pointwise
- func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: clamp_min_Tensor_out
MPS: clamp_min_Tensor_out_mps
tags: pointwise
# clip is an alias for clamp
- func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
cpp_no_default_args: ['min']
variants: function, method
tags: pointwise
- func: clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
variants: function, method
tags: pointwise
- func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
cpp_no_default_args: ['min']
variants: function, method
tags: pointwise
- func: clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
variants: function, method
tags: pointwise
- func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
cpp_no_default_args: ['min']
tags: pointwise
- func: clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
- func: cudnn_is_acceptable(Tensor self) -> bool
device_check: NoCheck
device_guard: False
- func: complex(Tensor real, Tensor imag) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: complex
- func: complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: complex_out
MPS: complex_out_mps
- func: polar(Tensor abs, Tensor angle) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: polar
- func: polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: polar_out
MPS: polar_out_mps
- func: constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: constant_pad_nd
MPS: constant_pad_nd_mps
autogen: constant_pad_nd.out
tags: core
- func: contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
variants: method
manual_cpp_binding: True
- func: convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
dispatch:
CompositeExplicitAutograd: convolution
autogen: convolution.out
tags: core
- func: convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CompositeExplicitAutograd, CUDA: convolution_backward
autogen: convolution_backward.out
tags: core
- func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
dispatch:
CompositeExplicitAutograd: convolution_overrideable
autogen: convolution_overrideable.out
- func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
dispatch:
CompositeExplicitAutograd: convolution_backward_overrideable
autogen: convolution_backward_overrideable.out
- func: _convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
dispatch:
CompositeExplicitAutograd: _convolution
autogen: _convolution.out
- func: _convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
- func: _convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
CompositeImplicitAutograd: _convolution_mode_symint
- func: _convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
- func: conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor
dispatch:
CompositeImplicitAutograd: conv1d_symint
- func: conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor
dispatch:
CompositeImplicitAutograd: conv2d_symint
- func: conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor
dispatch:
CompositeImplicitAutograd: conv3d_symint
- func: conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor
cpp_no_default_args: ['bias', 'stride', 'padding']
dispatch:
CompositeImplicitAutograd: conv1d_padding_symint
- func: conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor
cpp_no_default_args: ['bias', 'stride', 'padding']
dispatch:
CompositeImplicitAutograd: conv2d_padding_symint
- func: conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor
cpp_no_default_args: ['bias', 'stride', 'padding']
dispatch:
CompositeImplicitAutograd: conv3d_padding_symint
- func: conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
dispatch:
CompositeExplicitAutograd: conv_tbc
autogen: conv_tbc.out
- func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
# NB: we inherit the goofy argument order from PyTorch torch.nn.functional
- func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor
dispatch:
CompositeImplicitAutograd: conv_transpose1d_symint
- func: conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
dispatch:
CompositeImplicitAutograd: conv_transpose2d_symint
- func: conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor
dispatch:
CompositeImplicitAutograd: conv_transpose3d_symint
- func: copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: copy
tags: core
- func: copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
MkldnnCPU: copy_mkldnn_
SparseCPU, SparseCUDA: copy_sparse_wrapper_
CompositeExplicitAutograd: copy_
SparseCsrCPU, SparseCsrCUDA: copy_sparse_compressed_
NestedTensorCPU, NestedTensorCUDA: copy_nested_
autogen: copy.out
- func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
dispatch:
MPS: _copy_from_mps
autogen: _copy_from.out
# We need this to be able to properly copy from a CPU to an XLA tensor with different sizes.
# See https://github.com/pytorch/xla/issues/2881
- func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
dispatch:
MPS: _copy_from_and_resize_mps
autogen: _copy_from_and_resize.out
- func: cos(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cos.out
dispatch:
NestedTensorCPU, NestedTensorCUDA: cos_nested
tags: [core, pointwise]
- func: cos_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cos.out
tags: pointwise
- func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: cos_out
MPS: cos_out_mps
tags: pointwise
- func: cosh(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cosh.out
tags: [core, pointwise]
- func: cosh_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: cosh.out
tags: pointwise
- func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: cosh_out
MPS: cosh_out_mps
tags: pointwise
- func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
- func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
variants: function, method
dispatch:
CPU: count_nonzero_cpu
CUDA: count_nonzero_cuda
MPS: count_nonzero_mps
autogen: count_nonzero.dim_IntList_out
- func: count_nonzero(Tensor self, int? dim=None) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: count_nonzero
autogen: count_nonzero.out
- func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
variants: function, method
- func: corrcoef(Tensor self) -> Tensor
variants: function, method
- func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
dispatch:
CUDA: cudnn_affine_grid_generator_forward
autogen: cudnn_affine_grid_generator.out
# TODO: Why do I have to call this grad?!
- func: cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
dispatch:
CUDA: cudnn_affine_grid_generator_backward
autogen: cudnn_affine_grid_generator_backward.out
- func: cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: cudnn_batch_norm
autogen: cudnn_batch_norm.out
# NB: You can only use this if you used cudnn_batch_norm training=True
- func: cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: cudnn_batch_norm_backward
autogen: cudnn_batch_norm_backward.out
- func: cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
dispatch:
CUDA: cudnn_convolution
- func: cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CUDA: cudnn_convolution_out
- func: cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
dispatch:
CUDA: cudnn_convolution_transpose
autogen: cudnn_convolution_transpose.out
- func: _mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
MPS: _mps_convolution_transpose
autogen: _mps_convolution_transpose.out
- func: mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor)
dispatch:
MPS: mps_convolution_transpose_backward
autogen: mps_convolution_transpose_backward.out
- func: cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
CUDA: cudnn_convolution_relu
autogen: cudnn_convolution_relu.out
- func: cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
CUDA: cudnn_convolution_add_relu
autogen: cudnn_convolution_add_relu.out
# NB: input is special cased in a way I don't quite understand
- func: cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
dispatch:
CUDA: cudnn_grid_sampler_forward
autogen: cudnn_grid_sampler.out
- func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
dispatch:
CUDA: cudnn_grid_sampler_backward
autogen: cudnn_grid_sampler_backward.out
- func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: cummax
- func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: cummax_out
- func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
- func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
- func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
variants: function
dispatch:
CPU: cummax_helper_cpu
CUDA: cummax_helper_cuda
- func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: cummin
- func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: cummin_out
- func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
- func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
- func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
variants: function
dispatch:
CPU: cummin_helper_cpu
CUDA: cummin_helper_cuda
- func: cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
- func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
structured_delegate: cumprod.out
device_check: NoCheck # TensorIterator
variants: function, method
- func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
structured_delegate: cumprod.out
variants: method
- func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: cumprod_out
MPS: cumprod_out_mps
- func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
variants: method
- func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
- func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
structured_delegate: cumsum.out
device_check: NoCheck # TensorIterator
variants: function, method
tags: core
- func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
structured_delegate: cumsum.out
variants: method
- func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: cumsum_out
MPS: cumsum_out_mps
- func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
variants: method
- func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- func: cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
- func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
# convenience function that converts to intlists for you
- func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
- func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
dispatch:
CPU: ctc_loss_cpu
CUDA: ctc_loss_gpu
Meta: ctc_loss_meta
autogen: _ctc_loss.out
tags: dynamic_output_shape # the shape of second output is data dependent
- func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: ctc_loss_tensor
autogen: _ctc_loss.Tensor_out
tags: dynamic_output_shape # the shape of second output is data dependent
- func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
dispatch:
CPU: ctc_loss_backward_cpu
CUDA: ctc_loss_backward_gpu
autogen: _ctc_loss_backward.out
- func: _ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
dispatch:
CPU, CUDA: ctc_loss_backward_tensor
- func: diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutogradNonFunctional: diag_embed
autogen: diag_embed.out
- func: diagflat(Tensor self, int offset=0) -> Tensor
variants: function, method
- func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: diagonal
tags: core
- func: linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
python_module: linalg
variants: function
- func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
variants: function, method
- func: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: diagonal_backward_symint
autogen: diagonal_backward.out
- func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
variants: method
- func: diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
variants: function, method
- func: diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
variants: function
- func: gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
variants: function
- func: gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
variants: function
- func: gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
variants: function
- func: gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
variants: function
- func: gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
variants: function
- func: gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
variants: function
- func: gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
variants: function
- func: div.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: div.out
dispatch:
SparseCPU, SparseCUDA: div_sparse
ZeroTensor: div_zerotensor
NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Tensor
tags: [core, pointwise]
- func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: div.out
dispatch:
SparseCPU, SparseCUDA: div_sparse_
tags: pointwise
- func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: div_out
MPS: div_out_mps
SparseCPU, SparseCUDA: div_out_sparse_zerodim
tags: pointwise
- func: div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: div.out_mode
dispatch:
SparseCPU, SparseCUDA: div_sparse
tags: [core, pointwise]
- func: div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: div.out_mode
dispatch:
SparseCPU, SparseCUDA: div_sparse_
tags: pointwise
- func: div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: div_out_mode
MPS: div_out_mode_mps
SparseCPU, SparseCUDA: div_out_sparse_zerodim
tags: pointwise
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: div.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: div
NestedTensorCPU, NestedTensorCUDA: NestedTensor_div_Scalar
tags: [core, pointwise]
- func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: div_
autogen: div.Scalar_out
tags: pointwise
- func: div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: div
tags: [core, pointwise]
- func: div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: div_
autogen: div.Scalar_mode_out
tags: pointwise
# divide, alias for div
- func: divide.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: divide.Scalar(Tensor self, Scalar other) -> Tensor
variants: function, method
- func: divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
variants: function, method
- func: divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
variants: method
- func: divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
- func: divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
variants: function, method
- func: divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
variants: method
# true_divide, an alias for div
- func: true_divide.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
tags: pointwise
- func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: dot(Tensor self, Tensor tensor) -> Tensor
variants: function, method
dispatch:
CPU: dot
CUDA: dot_cuda
MPS: dot_mps
- func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: dot_out
- func: vdot(Tensor self, Tensor other) -> Tensor
variants: function, method
dispatch:
CPU: vdot
CUDA: vdot_cuda
- func: vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: vdot_out
- func: einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
- func: embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
dispatch:
CompositeExplicitAutograd: embedding_symint
NestedTensorCPU, NestedTensorCUDA: NestedTensor_embedding
autogen: embedding.out
tags: core
- func: embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
dispatch:
CompositeImplicitAutograd: embedding_backward_symint
- func: embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
dispatch:
CPU: embedding_dense_backward_cpu
CUDA: embedding_dense_backward_cuda
MPS: embedding_dense_backward_mps
autogen: embedding_dense_backward.out
tags: core
- func: embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
dispatch:
CPU: embedding_renorm_cpu_
CUDA: embedding_renorm_cuda_
autogen: embedding_renorm, embedding_renorm.out
- func: embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
# NOTE [ embedding_bag Native Functions ]
# The `_embedding_bag.*` variants assume that input tensors except for `weight`,
# e.g. `indices` and `offsets` (and `offset2bag`), are contiguous.
# We really only need to enforce this for `_embedding_bag` (the forward) because
# the backward inputs are the same as forward ones.
# The above `embedding_bag` wrapper is created to achieve this, e.g.,
# applying indices = indices.contiguous().
# The backward functions apply a check that these input tensors are contiguous.
- func: _embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: _embedding_bag_forward_only_cpu
CUDA: _embedding_bag_forward_only_cuda
autogen: _embedding_bag_forward_only.out
- func: _rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
# row_stack is the alias of vstack
- func: row_stack(Tensor[] tensors) -> Tensor
- func: row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- func: embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
# To keep backward and forward compatibility, and to avoid ambiguity with the
# original signature above, scale_grad_by_freq, mode, sparse,
# per_sample_weights, and include_last_offset parameters do not have default
# values. Once the original signature is removed, default values can be added.
- func: embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
- func: _embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: _embedding_bag_cpu
CUDA: _embedding_bag_cuda
autogen: _embedding_bag.out
tags: core
- func: _embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
dispatch:
CompositeImplicitAutograd: _embedding_bag_backward_symint
- func: _embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
dispatch:
CompositeImplicitAutograd: _embedding_bag_sparse_backward_symint
- func: _embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
dispatch:
CPU: _embedding_bag_dense_backward_cpu
CUDA: _embedding_bag_dense_backward_cuda
autogen: _embedding_bag_dense_backward.out
- func: _embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
dispatch:
CPU: _embedding_bag_per_sample_weights_backward_cpu
CUDA: _embedding_bag_per_sample_weights_backward_cuda
autogen: _embedding_bag_per_sample_weights_backward.out
- func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: empty_names
autogen: empty.names_out
- func: empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
CPU: empty_cpu
CUDA: empty_cuda
MPS: empty_mps
Meta: empty_meta_symint
MkldnnCPU: empty_mkldnn
SparseCPU, SparseCUDA, SparseMeta: empty_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: empty_sparse_compressed
QuantizedCPU, QuantizedCUDA, QuantizedMeta: empty_unknown_quantized
tags: core
- func: empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: empty_permuted_symint
autogen: empty_permuted.out
# We do not make new_empty a composite that calls into new_empty_strided, as the strided version
# is significantly more difficult to implement by different backends
- func: new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
CompositeExplicitAutograd: new_empty_symint
autogen: new_empty.out
- func: new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
CompositeExplicitAutogradNonFunctional: new_empty_strided_symint
autogen: new_empty_strided.out
- func: new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: new_full
autogen: new_full.out
- func: new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: new_zeros
autogen: new_zeros.out
- func: new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: method
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: new_ones
autogen: new_ones.out
# other overrides are to provide a more helpful error message that dtype is required
- func: _empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
dispatch:
CPU: empty_affine_quantized_other_backends_stub
QuantizedCPU, QuantizedCUDA: empty_affine_quantized
autogen: _empty_affine_quantized.out
# it's a factory function receiving a tensor argument, thus overriding explicitly
# other overrides are to provide a more helpful error message that dtype is required
- func: _empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
category_override: factory
dispatch:
CPU: empty_per_channel_affine_quantized_other_backends_stub
QuantizedCPU, QuantizedCUDA: empty_per_channel_affine_quantized
autogen: _empty_per_channel_affine_quantized.out
- func: resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
device_check: NoCheck
device_guard: False
tags: [core, inplace_view]
dispatch:
Meta: resize__symint
CPU: resize_
CUDA: resize_cuda_
MPS: resize_mps_
QuantizedCPU: quantized_resize_cpu_
SparseCsrCPU, SparseCsrCUDA: resize_sparse_csr_
autogen: resize, resize.out
# This is a utility function to enable users to resize out tensor while registering kernels for out variants.
# Eventually, we can consider exposing `resize_output` as a public API to ship it with python op registration
# to make it easy to register out variants for ops.
- func: _resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function
dispatch:
Meta: _resize_output_
autogen: _resize_output, _resize_output.out
- func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
category_override: factory
variants: function
dispatch:
QuantizedCPU, QuantizedCUDA: empty_quantized
autogen: empty_quantized.out
- func: empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
device_guard: False
- func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: empty_like
QuantizedCPU, QuantizedCUDA: empty_like_quantized
SparseCPU, SparseCUDA, SparseMeta: empty_like_sparse_coo
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: empty_like_sparse_csr
NestedTensorCPU, NestedTensorCUDA: empty_like_nested
autogen: empty_like.out
- func: empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: empty_strided_cpu
CUDA: empty_strided_cuda
MPS: empty_strided_mps
Meta: empty_strided_meta_symint
QuantizedCPU, QuantizedCUDA: empty_strided_unknown_quantized
autogen: empty_strided.out
tags: core
- func: erf(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erf.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: erf_sparse
SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr
tags: [core, pointwise]
- func: erf_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: erf.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: erf_sparse_
SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_
tags: pointwise
- func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: erf_out
MPS: erf_out_mps
SparseCPU, SparseCUDA: erf_sparse_out
SparseCsrCPU, SparseCsrCUDA: erf_sparse_csr_out
tags: pointwise
- func: erfc(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erfc.out
variants: function, method
tags: pointwise
- func: erfc_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: erfc.out
variants: function, method
tags: pointwise
- func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: erfc_out
tags: pointwise
- func: exp(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: exp.out
variants: function, method
tags: [core, pointwise]
- func: exp_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: exp.out
variants: function, method
tags: pointwise
- func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: exp_out
MPS: exp_out_mps
tags: pointwise
- func: exp2(Tensor self) -> Tensor
structured_delegate: exp2.out
variants: function, method
tags: pointwise
- func: exp2_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: exp2.out
variants: function, method
tags: pointwise
- func: exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: exp2_out
MPS: exp2_out_mps
tags: pointwise
- func: expm1(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: expm1.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: expm1_sparse
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr
tags: [core, pointwise]
- func: expm1_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: expm1.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: expm1_sparse_
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_
tags: pointwise
- func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: expm1_out
MPS: expm1_out_mps
SparseCPU, SparseCUDA: expm1_sparse_out
SparseCsrCPU, SparseCsrCUDA: expm1_sparse_csr_out
tags: pointwise
- func: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: expand
tags: core
- func: expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
device_check: NoCheck
device_guard: False
# decomposes to eye.m
- func: eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: eye
- func: eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: eye
- func: eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: eye_out_cpu
CUDA: eye_out_cuda
MPS: eye_out_mps
- func: eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: eye_out_cpu
CUDA: eye_out_cuda
MPS: eye_out_mps
- func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
variants: function, method
- func: flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
variants: function, method
- func: flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
variants: function, method
- func: flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
variants: function, method
- func: unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)
variants: function, method
dispatch:
CompositeImplicitAutograd: unflatten_symint
- func: unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)
variants: function, method
dispatch:
CompositeImplicitAutograd: unflatten_dimname_symint
- func: fill.Scalar(Tensor self, Scalar value) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: fill
tags: core
- func: fill.Tensor(Tensor self, Tensor value) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: fill
- func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: fill_
MPS: fill_scalar_mps
QuantizedCPU, QuantizedCUDA: fill_quantized_
Meta: fill_meta_
SparseCsrCPU, SparseCsrCUDA: fill_sparse_csr_
NestedTensorCPU, NestedTensorCUDA: fill_nested_
autogen: fill.Scalar_out
- func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: fill_
MPS: fill_tensor_mps_
QuantizedCPU, QuantizedCUDA: fill_quantized_
Meta: fill_meta_
NestedTensorCPU, NestedTensorCUDA: fill_nested_
autogen: fill.Tensor_out
- func: floor(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: floor.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: floor_sparse
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr
tags: [core, pointwise]
- func: floor_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: floor.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: floor_sparse_
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_
tags: pointwise
- func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: floor_out
MPS: floor_out_mps
SparseCPU, SparseCUDA: floor_sparse_out
SparseCsrCPU, SparseCsrCUDA: floor_sparse_csr_out
tags: pointwise
- func: floor_divide(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: floor_divide
MPS: floor_divide_mps
SparseCPU, SparseCUDA: floor_divide_sparse
- func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: floor_divide_
MPS: floor_divide_mps_
SparseCPU, SparseCUDA: floor_divide_sparse_
- func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: floor_divide_out
MPS: floor_divide_out_mps
SparseCPU, SparseCUDA: floor_divide_out_sparse_zerodim
- func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: floor_divide
- func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: floor_divide_
autogen: floor_divide.Scalar_out
- func: frac(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: frac.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: frac_sparse
SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr
tags: pointwise
- func: frac_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: frac.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: frac_sparse_
SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_
tags: pointwise
- func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: frac_out
MPS: frac_out_mps
SparseCPU, SparseCUDA: frac_sparse_out
SparseCsrCPU, SparseCsrCUDA: frac_sparse_csr_out
tags: pointwise
- func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: full
autogen: full.names_out
- func: full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: full
tags: core
- func: full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: full_out
- func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: full_like
autogen: full_like.out
- func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: from_file
autogen: from_file.out
- func: gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: gcd_out
tags: pointwise
- func: gcd(Tensor self, Tensor other) -> Tensor
structured_delegate: gcd.out
variants: function, method
tags: pointwise
- func: gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: gcd.out
variants: function, method
- func: lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lcm_out
tags: pointwise
- func: lcm(Tensor self, Tensor other) -> Tensor
structured_delegate: lcm.out
variants: function, method
tags: pointwise
- func: lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: lcm.out
variants: function, method
# NOTE [ grid_sampler Native Functions ]
# `grid_sampler` is _supposed to_ do all the shape checking and then dispatch to
# one of `cudnn_grid_sampler`, `grid_sampler_2d`, or `grid_sampler_3d`, each of
# which has the corresponding backward defined as native functions as well.
# However, we do shape checking everywhere for now since each of the mentioned
# functions can be called directly, which will lead to crashes otherwise.
# See https://github.com/pytorch/pytorch/issues/73187 for more information.
#
# There is also _grid_sampler_2d_backward_cpu_fallback which is an
# implementation detail of grid_sampler_2d and is only exposed here for testing
# purposes.
#
# Additionally, arguments `padding_mode` and `interpolation_mode` are cast to
# enums defined in `native/GridSampler.h`. `cudnn_grid_sampler` doesn't take in
# `interpolation_mode` because it only supports Bilinear interpolation mode.
# Nor does it take in `align_corners` because it only supports the mode
# `align_corners = True`.
- func: grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
- func: grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CPU, QuantizedCPU: grid_sampler_2d_cpu
CUDA: grid_sampler_2d_cuda
MPS: grid_sampler_2d_mps
autogen: grid_sampler_2d.out
tags: core
# `grid_sampler_2d_backward` takes in `output_mask` to optimize performance for
# the case where `input` doesn't require gradient. Gradient for `grid` is always
# computed (only `output_mask[0]` is checked by the implementations).
- func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
dispatch:
CPU: grid_sampler_2d_backward_cpu
CUDA: grid_sampler_2d_backward_cuda
autogen: grid_sampler_2d_backward.out
# See NOTE [ grid_sample CPU fallback ]
- func: _grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CompositeExplicitAutograd: _grid_sampler_2d_cpu_fallback
autogen: _grid_sampler_2d_cpu_fallback.out
- func: _grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
- func: grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
dispatch:
CPU: grid_sampler_3d_cpu
CUDA: grid_sampler_3d_cuda
autogen: grid_sampler_3d.out
# `grid_sampler_3d_backward` takes in `output_mask` to optimize performance for
# the case where `input` doesn't require gradient. Gradient for `grid` is always
# computed (only `output_mask[0]` is checked by the implementations).
- func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
dispatch:
CPU: grid_sampler_3d_backward_cpu
CUDA: grid_sampler_3d_backward_cuda
autogen: grid_sampler_3d_backward.out
- func: hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: hann_window
autogen: hann_window.out
- func: hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: hann_window
autogen: hann_window.periodic_out
- func: hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: hamming_window
autogen: hamming_window.out
- func: hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: hamming_window
autogen: hamming_window.periodic_out
- func: hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: hamming_window
autogen: hamming_window.periodic_alpha_out
- func: hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: hamming_window
autogen: hamming_window.periodic_alpha_beta_out
- func: kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: kaiser_window
autogen: kaiser_window.out
- func: kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: kaiser_window
autogen: kaiser_window.periodic_out
- func: kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: kaiser_window
autogen: kaiser_window.beta_out
- func: hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
- func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
- func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, CUDA: native_group_norm
CompositeExplicitAutograd: math_group_norm
autogen: native_group_norm.out
tags: core
- func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, CUDA: native_group_norm_backward
autogen: native_group_norm_backward.out
tags: core
# Real to complex forward FFT
- func: _fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
variants: function
dispatch:
CPU: _fft_r2c_mkl
CUDA: _fft_r2c_cufft
MPS: _fft_r2c_mps
- func: _fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CPU: _fft_r2c_mkl_out
CUDA: _fft_r2c_cufft_out
MPS: _fft_r2c_mps_out
# Complex to real inverse FFT
- func: _fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor
variants: function
dispatch:
CPU: _fft_c2r_mkl
CUDA: _fft_c2r_cufft
MPS: _fft_c2r_mps
- func: _fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CPU: _fft_c2r_mkl_out
CUDA: _fft_c2r_cufft_out
MPS: _fft_c2r_mps_out
# Standard complex to complex FFT (forward or backward)
- func: _fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
variants: function
dispatch:
CPU: _fft_c2c_mkl
CUDA: _fft_c2c_cufft
MPS: _fft_c2c_mps
- func: _fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CPU: _fft_c2c_mkl_out
CUDA: _fft_c2c_cufft_out
MPS: _fft_c2c_mps_out
- func: _validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
device_check: NoCheck
variants: function
dispatch:
CPU: _validate_compressed_sparse_indices_cpu
CUDA: _validate_compressed_sparse_indices_cuda
- func: _cufft_get_plan_cache_size(DeviceIndex device_index) -> int
- func: _cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int
- func: _cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()
- func: _cufft_clear_plan_cache(DeviceIndex device_index) -> ()
- func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: index.Tensor_out
variants: function, method
dispatch:
QuantizedCPU: quantized_index
tags: [core, dynamic_output_shape]
# NB: This function is special-cased in tools/autograd/gen_variable_type.py
# NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
# - Tensor Tensor::index(ArrayRef<TensorIndex> indices)
# - Tensor Tensor::index(std::initializer_list<TensorIndex> indices)
- func: index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
structured: True
structured_inherits: TensorIteratorBase
precomputed:
- indices -> DimVector sizes, DimVector strides
dispatch:
CPU, CUDA, MPS: index_out
# Used by inductor to signal indexing without bounds checks
# Note that we don't support boolean indexing, to avoid dynamic output shapes
- func: _unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _unsafe_index
- func: index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
precomputed:
- dim -> int dim
dispatch:
CPU, CUDA: index_copy_out
- func: index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
variants: method
structured_delegate: index_copy.out
- func: index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
variants: function, method
structured_delegate: index_copy.out
- func: index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
variants: method
- func: index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
variants: function, method
- func: index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
device_check: NoCheck # delegate to _index_put_impl_, which leverages TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: index_put_
autogen: index_put.out
# NB: The following functions are declared in aten/src/ATen/templates/TensorBody.h and defined in aten/src/ATen/TensorIndexing.cpp:
# - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Tensor const & rhs)
# - Tensor & Tensor::index_put_(ArrayRef<TensorIndex> indices, Scalar v)
# - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Tensor const & rhs)
# - Tensor & Tensor::index_put_(std::initializer_list<TensorIndex> indices, Scalar v)
- func: index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: index_put
tags: core
- func: _unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
device_check: NoCheck # delegate to _index_put_impl_ after clone, which leverages TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: _unsafe_index_put
- func: _index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA, MPS: _index_put_impl_
QuantizedCPU: _index_put_impl_quantized_cpu_
QuantizedCUDA: _index_put_impl_quantized_cuda_
autogen: _index_put_impl, _index_put_impl.out
- func: instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
variants: function
- func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
variants: function, method
- func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
variants: function
structured: True
dispatch:
CPU, CUDA: isin_Tensor_Tensor_out
- func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
variants: function
structured_delegate: isin.Tensor_Tensor_out
- func: isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
variants: function
structured: True
dispatch:
CPU, CUDA: isin_Tensor_Scalar_out
- func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
variants: function
structured_delegate: isin.Tensor_Scalar_out
- func: isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
variants: function
structured: True
dispatch:
CPU, CUDA: isin_Scalar_Tensor_out
- func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
variants: function
structured_delegate: isin.Scalar_Tensor_out
- func: isnan(Tensor self) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, MPS: isnan
SparseCPU, SparseCUDA: isnan_sparse
SparseCsrCPU, SparseCsrCUDA: isnan_sparse_csr
autogen: isnan.out
tags: [core, pointwise]
- func: is_distributed(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
- func: is_floating_point(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: is_complex(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: is_conj(Tensor self) -> bool
variants: function, method
device_guard: False
manual_cpp_binding: True
- func: _is_zerotensor(Tensor self) -> bool
variants: function, method
device_guard: False
manual_cpp_binding: True
- func: is_neg(Tensor self) -> bool
variants: function, method
device_guard: False
manual_cpp_binding: True
- func: isreal(Tensor self) -> Tensor
variants: function, method
- func: is_nonzero(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
- func: is_same_size(Tensor self, Tensor other) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
NestedTensorCPU, NestedTensorCUDA: nested_is_same_size
CompositeExplicitAutograd: is_same_size
- func: is_signed(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: is_inference(Tensor self) -> bool
variants: function, method
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
- func: kron(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CompositeExplicitAutograd: kthvalue
- func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
dispatch:
CPU: kthvalue_out_cpu
CUDA: kthvalue_out_cuda
- func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
dispatch:
CompositeImplicitAutograd: layer_norm_symint
- func: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: layer_norm_cpu
CUDA: layer_norm_cuda
MPS: layer_norm_mps
CompositeExplicitAutograd: math_native_layer_norm
NestedTensorCPU, NestedTensorCUDA: nested_layer_norm
autogen: native_layer_norm.out
tags: core
- func: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: layer_norm_backward_cpu
CUDA: layer_norm_backward_cuda
MPS: layer_norm_backward_mps
NestedTensorCPU, NestedTensorCUDA: layer_norm_backward_nested
autogen: native_layer_norm_backward.out
tags: core
- func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: nan_to_num
SparseCPU, SparseCUDA: nan_to_num_sparse
tags: pointwise
- func: nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: nan_to_num_
SparseCPU, SparseCUDA: nan_to_num_sparse_
tags: pointwise
- func: nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: nan_to_num_out
MPS: nan_to_num_out_mps
SparseCPU, SparseCUDA: nan_to_num_sparse_out
tags: pointwise
- func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: linear
NestedTensorCPU, NestedTensorCUDA: nested_linear
MPS: _mps_linear
- func: linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
NestedTensorCPU, NestedTensorCUDA: nested_linear_backward
MPS: mps_linear_backward
autogen: linear_backward.out
- func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CompositeExplicitAutograd: linear_out
- func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
python_module: nn
dispatch:
MkldnnCPU: mkldnn_linear
autogen: mkldnn_linear.out
- func: mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
dispatch:
MkldnnCPU: mkldnn_linear_backward_input
autogen: mkldnn_linear_backward_input.out
- func: mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
dispatch:
MkldnnCPU: mkldnn_linear_backward_weights
autogen: mkldnn_linear_backward_weights.out
- func: mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
MkldnnCPU: mkldnn_linear_backward
autogen: mkldnn_linear_backward.out
- func: _cslt_compress(Tensor input) -> Tensor
dispatch:
CUDA: _cslt_compress
- func: _cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor
dispatch:
CUDA: _cslt_sparse_mm
- func: _cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int
dispatch:
CUDA: _cslt_sparse_mm_search
- func: _sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
dispatch:
CUDA: _sparse_semi_structured_linear
- func: _mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor
dispatch:
CUDA: _mixed_dtypes_linear
- func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
- func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
- func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
- func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
- func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
- func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
- func: fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
- func: fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
- func: ldexp.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: function, method
tags: pointwise
- func: ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
tags: pointwise
- func: linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: linspace
- func: linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
category_override: factory
dispatch:
CompositeExplicitAutograd: linspace
- func: linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
category_override: factory
dispatch:
CompositeExplicitAutograd: linspace
- func: linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
category_override: factory
dispatch:
CompositeExplicitAutograd: linspace
- func: linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: linspace_out
CUDA: linspace_cuda_out
MPS: linspace_out_mps
- func: linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
category_override: factory
dispatch:
CompositeExplicitAutograd: linspace_out
- func: linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
category_override: factory
dispatch:
CompositeExplicitAutograd: linspace_out
- func: linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
category_override: factory
dispatch:
CompositeExplicitAutograd: linspace_out
- func: log(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log.out
variants: function, method
tags: [core, pointwise]
- func: log_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log.out
variants: function, method
tags: pointwise
- func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log_out
MPS: log_out_mps
tags: pointwise
- func: log10(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log10.out
variants: function, method
tags: [core, pointwise]
- func: log10_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log10.out
variants: function, method
tags: pointwise
- func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log10_out
MPS: log10_out_mps
tags: pointwise
- func: log1p(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log1p.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: log1p_sparse
SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr
tags: [core, pointwise]
- func: log1p_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log1p.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: log1p_sparse_
SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_
tags: pointwise
- func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log1p_out
MPS: log1p_out_mps
SparseCPU, SparseCUDA: log1p_sparse_out
SparseCsrCPU, SparseCsrCUDA: log1p_sparse_csr_out
tags: pointwise
- func: log2(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: log2.out
variants: function, method
tags: [core, pointwise]
- func: log2_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: log2.out
variants: function, method
tags: pointwise
- func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: log2_out
MPS: log2_out_mps
tags: pointwise
- func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: logaddexp_out
MPS: logaddexp_out_mps
tags: pointwise
- func: logaddexp(Tensor self, Tensor other) -> Tensor
variants: method, function
structured_delegate: logaddexp.out
tags: pointwise
- func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: logaddexp2_out
MPS: logaddexp2_out_mps
tags: pointwise
- func: logaddexp2(Tensor self, Tensor other) -> Tensor
variants: method, function
structured_delegate: logaddexp2.out
tags: pointwise
- func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: xlogy.OutTensor
variants: function, method
tags: pointwise
- func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy
tags: pointwise
- func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: xlogy
tags: pointwise
# xlogy: inplace variant
- func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: xlogy.OutTensor
tags: pointwise
- func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: xlogy_
# xlogy: out variant
- func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: xlogy_out
MPS: xlogy_out_mps
tags: pointwise
- func: xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy_out
tags: pointwise
- func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: xlogy_out
tags: pointwise
- func: logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: logspace
- func: logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
category_override: factory
dispatch:
CompositeExplicitAutograd: logspace
- func: logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
category_override: factory
dispatch:
CompositeExplicitAutograd: logspace
- func: logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
category_override: factory
dispatch:
CompositeExplicitAutograd: logspace
- func: logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: logspace_out
CUDA: logspace_cuda_out
- func: logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
category_override: factory
dispatch:
CompositeExplicitAutograd: logspace_out
- func: logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
category_override: factory
dispatch:
CompositeExplicitAutograd: logspace_out
- func: logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
category_override: factory
dispatch:
CompositeExplicitAutograd: logspace_out
# log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
- func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CompositeExplicitAutograd: log_softmax_out
- func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
structured_delegate: _log_softmax.out
tags: core
- func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: log_softmax_cpu_out
CUDA: log_softmax_cuda_out
MPS: log_softmax_mps_out
- func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
structured_delegate: _log_softmax_backward_data.out
- func: _log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: log_softmax_backward_cpu_out
CUDA: log_softmax_backward_cuda_out
MPS: log_softmax_backward_mps_out
- func: _logcumsumexp(Tensor self, int dim) -> Tensor
dispatch:
CPU: _logcumsumexp_cpu
CUDA: _logcumsumexp_cuda
- func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: _logcumsumexp_out_cpu
CUDA: _logcumsumexp_out_cuda
- func: logcumsumexp(Tensor self, int dim) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: logcumsumexp
- func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: logcumsumexp_out
- func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
variants: function, method
- func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
- func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: logsumexp
- func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
# calls squeeze
CompositeExplicitAutogradNonFunctional: logsumexp_out
- func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
- func: matmul(Tensor self, Tensor other) -> Tensor
variants: function, method
dispatch:
CompositeImplicitAutograd: matmul
NestedTensorCPU, NestedTensorCUDA: matmul_nested
- func: matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
dispatch:
NestedTensorCPU, NestedTensorCUDA: matmul_backward_nested
autogen: matmul_backward.out
- func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeImplicitAutograd: matmul_out
NestedTensorCPU, NestedTensorCUDA: matmul_out_nested
# Alias to linalg.matrix_power
- func: matrix_power(Tensor self, int n) -> Tensor
variants: function, method
# Alias to linalg.matrix_power
- func: matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
# Alias to linalg.matrix_exp
- func: matrix_exp(Tensor self) -> Tensor
variants: function, method
# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
# DEPRECATED: Use torch.aminmax instead
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax_all
autogen: _aminmax.out
# DEPRECATED: Use torch.aminmax instead
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax
autogen: _aminmax.dim_out
- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
device_check: NoCheck # TensorIterator
structured_delegate: aminmax.out
variants: function, method
- func: aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: aminmax_out
MPS: aminmax_out_mps
- func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
dispatch:
CPU, CUDA: _compute_linear_combination
- func: _compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: _compute_linear_combination_out
- func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
structured_delegate: max.dim_max
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: qmax
tags: core
- func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
structured: True
precomputed:
- dim -> int dim
dispatch:
CPU, CUDA: max_out
MPS: max_out_mps
- func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
- func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
- func: value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: value_selecting_reduction_backward_symint
- func: amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
variants: function, method
structured_delegate: amax.out
tags: core
- func: amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: amax_out
MPS: amax_out_mps
# Return: (Tensor output, Tensor indices)
- func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
- func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
- func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
CompositeImplicitAutograd: max_pool2d
MPS: mps_max_pool2d
- func: max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MPS: mps_max_pool2d_backward
autogen: max_pool2d_backward.out
- func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool2d
autogen: mkldnn_max_pool2d.out
- func: mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool2d_backward
autogen: mkldnn_max_pool2d_backward.out
- func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool3d
autogen: mkldnn_max_pool3d.out
- func: mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
MkldnnCPU: mkldnn_max_pool3d_backward
autogen: mkldnn_max_pool3d_backward.out
- func: quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
QuantizedCPU: quantized_max_pool1d
autogen: quantized_max_pool1d.out
- func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
QuantizedCPU: quantized_max_pool2d
QuantizedCUDA: quantized_max_pool2d_cudnn
autogen: quantized_max_pool2d.out
- func: quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
dispatch:
QuantizedCPU: quantized_max_pool3d
autogen: quantized_max_pool3d.out
- func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
# The CPU and GPU dispatch variants are named weirdly here because otherwise there
# are namespacing issues in C++
- func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mean
tags: core
# For normal naming convention this should be `mean.out`. However since we already have `mean.out` we have to rename this.
# FIXME: fix CI jobs and re-enable this
#- func: mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
# device_check: NoCheck # TensorIterator
# dispatch:
# CompositeExplicitAutograd: mean_dtype_out
- func: mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
structured_delegate: mean.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
QuantizedCPU: mean_quantized_cpu
tags: core
- func: mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: mean_out
MPS: mean_out_mps
QuantizedCPU: mean_out_quantized_cpu
- func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # Composite
variants: function, method
- func: nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # Composite
- func: median(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU: median_cpu
CUDA: median_cuda
MPS: median_mps
autogen: median.out
- func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CompositeExplicitAutograd: median
- func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
dispatch:
CPU: median_out_cpu
CUDA: median_out_cuda
MPS: median_out_mps
- func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: nanmedian(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU: nanmedian_cpu
CUDA: nanmedian_cuda
autogen: nanmedian.out
- func: nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CompositeExplicitAutograd: nanmedian
- func: nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
dispatch:
CPU: nanmedian_out_cpu
CUDA: nanmedian_out_cuda
- func: nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
structured_delegate: min.dim_min
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: qmin
tags: core
- func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
structured: True
precomputed:
- dim -> int dim
dispatch:
CPU, CUDA: min_out
MPS: min_out_mps
- func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: function, method
- func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
- func: amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
variants: function, method
structured_delegate: amin.out
tags: core
- func: amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: amin_out
MPS: amin_out_mps
# TODO: Add this function to MPS dispatch key so that we avoid declaring it in
# native_functions.yaml
# https://github.com/pytorch/pytorch/issues/77394
- func: _mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
MPS: _mps_convolution
autogen: _mps_convolution.out
- func: mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
MPS: mps_convolution_backward
autogen: mps_convolution_backward.out
- func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
CompositeExplicitAutograd: mkldnn_convolution
autogen: mkldnn_convolution.out
- func: mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: mkldnn_rnn_layer
MkldnnCPU: mkldnn_rnn_layer
autogen: mkldnn_rnn_layer.out
- func: mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: mkldnn_rnn_layer_backward
autogen: mkldnn_rnn_layer_backward.out
- func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_batch_norm
autogen: miopen_batch_norm.out
- func: miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_batch_norm_backward
autogen: miopen_batch_norm_backward.out
- func: miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_convolution
autogen: miopen_convolution.out
- func: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_convolution_transpose
autogen: miopen_convolution_transpose.out
- func: miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
dispatch:
CUDA: miopen_depthwise_convolution
autogen: miopen_depthwise_convolution.out
- func: miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
CUDA: miopen_convolution_relu
- func: miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
dispatch:
CUDA: miopen_convolution_add_relu
- func: miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: miopen_rnn
autogen: miopen_rnn.out
tags: nondeterministic_seeded
- func: miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
dispatch:
CUDA: miopen_rnn_backward
autogen: miopen_rnn_backward.out
- func: mm(Tensor self, Tensor mat2) -> Tensor
structured_delegate: mm.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: _sparse_mm
SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm
tags: core
- func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: mm_out_cpu
CUDA: mm_out_cuda
MPS: mm_out_mps
SparseCPU, SparseCUDA: _sparse_mm_out
SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out
- func: _int_mm(Tensor self, Tensor mat2) -> Tensor
dispatch:
CUDA: _int_mm_cuda
- func: _int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CUDA: _int_mm_out_cuda
- func: _convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor
dispatch:
CPU: _convert_weight_to_int4pack_cpu
CUDA: _convert_weight_to_int4pack_cuda
- func: _weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
dispatch:
CPU: _weight_int4pack_mm_cpu
CUDA: _weight_int4pack_mm_cuda
- func: _weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor
dispatch:
CPU: _weight_int8pack_mm_cpu
- func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
python_module: sparse
- func: _sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
python_module: sparse
- func: _sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
dispatch:
SparseCPU: sparse_sparse_matmul_cpu
SparseCUDA: sparse_sparse_matmul_cuda
autogen: _sparse_sparse_matmul.out
- func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
dispatch:
CPU, CUDA: mode
- func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
dispatch:
CompositeExplicitAutograd: mode_out
- func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
variants: function, method
- func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: mul.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: mul.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: mul_sparse
SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr
MkldnnCPU: mkldnn_mul
ZeroTensor: mul_zerotensor
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Tensor
tags: [core, pointwise]
- func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: mul.out
variants: method
dispatch:
SparseCPU, SparseCUDA: mul_sparse_
SparseCsrCPU, SparseCsrCUDA: mul_sparse_csr_
MkldnnCPU: mkldnn_mul_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Tensor
tags: pointwise
- func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: mul_out
MPS: mul_out_mps
SparseCPU: mul_out_sparse_cpu
SparseCUDA: mul_out_sparse_cuda
SparseCsrCPU, SparseCsrCUDA: mul_out_sparse_csr
MkldnnCPU: mkldnn_mul_out
tags: pointwise
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: mul.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mul
SparseCsrCPU, SparseCsrCUDA: mul_scalar_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul_Scalar
tags: [core, pointwise]
- func: mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: mul_
SparseCsrCPU, SparseCsrCUDA: mul__scalar_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_mul__Scalar
autogen: mul.Scalar_out
tags: pointwise
# multiply, alias for mul
- func: multiply.Tensor(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: multiply.Scalar(Tensor self, Scalar other) -> Tensor
variants: function, method
- func: multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: mv(Tensor self, Tensor vec) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: mv
SparseCPU, SparseCUDA: mv_sparse
- func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: mv_out
- func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: mvlgamma_out
tags: pointwise
- func: mvlgamma(Tensor self, int p) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: mvlgamma
tags: pointwise
- func: mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: mvlgamma_
tags: pointwise
- func: narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
variants: function, method
dispatch:
CPU: narrow_copy_dense_cpu
SparseCPU, SparseCUDA: narrow_copy_sparse
CompositeExplicitAutogradNonFunctional: narrow_copy_dense_symint
tags: view_copy
- func: narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: narrow_copy_dense_cpu_out
- func: narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: narrow_symint
NestedTensorCPU, NestedTensorCUDA: narrow_nested_symint
- func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: narrow_tensor_symint
- func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: batch_norm_cpu
CUDA: batch_norm_cuda
MPS: batch_norm_mps
MkldnnCPU: mkldnn_batch_norm
- func: native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
dispatch:
CUDA: batch_norm_cuda_out
MPS: batch_norm_mps_out
CPU: batch_norm_cpu_out
# TODO: In 2 weeks, we should make native_batch_norm composite implicit so that this correct schema percolates correctly through our dispatching
- func: _native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: _batch_norm_legit_cpu
CUDA: _batch_norm_legit_cuda
MPS: _batch_norm_legit_mps
MkldnnCPU: _mkldnn_batch_norm_legit
autogen: _native_batch_norm_legit_functional
tags: core
# HACK: identical to _native_batch_norm_legit, but training is known to be False,
# So we known that running stats will not be mutated.
# The real fix here is batch norm consolidation.
- func: _native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CompositeExplicitAutograd: _batch_norm_legit_no_training
autogen: _native_batch_norm_legit_no_training.out
tags: core
- func: _native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
dispatch:
CPU: _batch_norm_legit_cpu_out
CUDA: _batch_norm_legit_cuda_out
MPS: _batch_norm_legit_mps_out
- func: _native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: _batch_norm_legit_no_stats_cpu
CUDA: _batch_norm_legit_no_stats_cuda
MPS: _batch_norm_legit_no_stats_mps
MkldnnCPU: _mkldnn_batch_norm_legit_no_stats
tags: core
- func: _native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
dispatch:
CPU: _batch_norm_legit_no_stats_cpu_out
CUDA: _batch_norm_legit_no_stats_cuda_out
MPS: _batch_norm_legit_no_stats_mps_out
- func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_stats_cuda
autogen: batch_norm_stats.out
- func: batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
dispatch:
CUDA: batch_norm_elemt_cuda
- func: batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CUDA: batch_norm_elemt_cuda_out
# for backward compatibility
- func: batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_gather_stats_cuda
autogen: batch_norm_gather_stats.out
- func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
dispatch:
CUDA: batch_norm_gather_stats_with_counts_cuda
autogen: batch_norm_gather_stats_with_counts.out
- func: native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: batch_norm_backward_cpu
CUDA: batch_norm_backward_cuda
MPS: batch_norm_backward_mps
MkldnnCPU: mkldnn_batch_norm_backward
autogen: native_batch_norm_backward.out
- func: batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: batch_norm_backward_reduce_cuda
autogen: batch_norm_backward_reduce.out
- func: batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor
dispatch:
CUDA: batch_norm_backward_elemt_cuda
autogen: batch_norm_backward_elemt.out
- func: batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
dispatch:
CPU: batch_norm_update_stats_cpu
CUDA: batch_norm_update_stats_cuda
autogen: batch_norm_update_stats.out
- func: is_vulkan_available() -> bool
- func: _nnpack_available() -> bool
- func: _nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _nnpack_spatial_convolution
autogen: _nnpack_spatial_convolution.out
- func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: ones
autogen: ones.names_out
- func: ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: ones
- func: ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: ones_out
- func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: ones_like
NestedTensorCPU, NestedTensorCUDA: ones_like
autogen: ones_like.out
- func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
- func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
- func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor
dispatch:
CompositeExplicitAutograd: _euclidean_dist
autogen: _euclidean_dist.out
- func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
dispatch:
CPU, CUDA: _cdist_forward
MPS: _cdist_forward_mps
autogen: _cdist_forward.out
tags: core
- func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
dispatch:
CPU, CUDA: _cdist_backward
autogen: _cdist_backward.out
- func: pdist(Tensor self, float p=2) -> Tensor
- func: _pdist_forward(Tensor self, float p=2) -> Tensor
dispatch:
CPU, CUDA: _pdist_forward
autogen: _pdist_forward.out
tags: core
- func: _pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
dispatch:
CPU, CUDA: _pdist_backward
autogen: _pdist_backward.out
- func: cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
variants: function
- func: permute(Tensor(a) self, int[] dims) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: permute
MPS: permute_mps
SparseCPU, SparseCUDA: permute_sparse_coo
tags: core
- func: movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
variants: function, method
- func: movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
variants: function, method
# moveaxis, alias for movedim
- func: moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
variants: function, method
- func: moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
variants: function, method
# Only exposed from C++ -- in Python,
# we expose it as an attribute `T`, not a function.
#
# I'd like to name this "T" in C++ too, but
# calling a native function "T" causes undefined
# behavior on Windows, for reasons I don't understand
# (maybe related to capital letter collation somehow...)
- func: numpy_T(Tensor(a) self) -> Tensor(a)
variants: method
# Exposed on Python as an attribute 'H'
- func: matrix_H(Tensor(a) self) -> Tensor(a)
variants: method
# Exposed on Python as an attribute 'mT'
- func: mT(Tensor(a) self) -> Tensor(a)
variants: method
# Exposed on Python as an attribute 'mH'
- func: mH(Tensor(a) self) -> Tensor(a)
variants: method
- func: adjoint(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
dispatch:
CPU: pixel_shuffle_cpu
MPS: pixel_shuffle_mps
CompositeExplicitAutogradNonFunctional: math_pixel_shuffle
autogen: pixel_shuffle.out
- func: pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
dispatch:
CPU: pixel_unshuffle_cpu
MPS: pixel_unshuffle_mps
CompositeExplicitAutogradNonFunctional: math_pixel_unshuffle
autogen: pixel_unshuffle.out
- func: channel_shuffle(Tensor self, SymInt groups) -> Tensor
dispatch:
CPU, CUDA: channel_shuffle
QuantizedCPU: channel_shuffle_quantized_cpu
autogen: channel_shuffle.out
- func: native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
dispatch:
CPU: channel_shuffle_cpu
CompositeImplicitAutograd: math_channel_shuffle
- func: is_pinned(Tensor self, Device? device=None) -> bool
variants: method
dispatch:
NestedTensorCUDA, CUDA: is_pinned_cuda
MPS: is_pinned_mps
CompositeExplicitAutograd: is_pinned_default
# TODO: add a copy kwarg that guarantees that the tensor is put into fresh
# pinned memory
- func: pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
variants: method
# Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor
- func: _pin_memory(Tensor self, Device? device=None) -> Tensor
dispatch:
CUDA: _pin_memory_cuda
MPS: _pin_memory_mps
NestedTensorCUDA, NestedTensorCPU: _pin_memory_nested
autogen: _pin_memory.out
- func: pinverse(Tensor self, float rcond=1e-15) -> Tensor
variants: function, method
- func: poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
variants: function
- func: rad2deg(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: rad2deg
SparseCPU, SparseCUDA: rad2deg_sparse
SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr
- func: rad2deg_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: rad2deg_
SparseCPU, SparseCUDA: rad2deg_sparse_
SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_
- func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: rad2deg_out
SparseCPU, SparseCUDA: rad2deg_sparse_out
SparseCsrCPU, SparseCsrCUDA: rad2deg_sparse_csr_out
- func: deg2rad(Tensor self) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: deg2rad
SparseCPU, SparseCUDA: deg2rad_sparse
SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr
tags: pointwise
- func: deg2rad_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
dispatch:
CompositeExplicitAutograd: deg2rad_
SparseCPU, SparseCUDA: deg2rad_sparse_
SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_
tags: pointwise
- func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: deg2rad_out
SparseCPU, SparseCUDA: deg2rad_sparse_out
SparseCsrCPU, SparseCsrCUDA: deg2rad_sparse_csr_out
tags: pointwise
- func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: scalar_tensor
autogen: scalar_tensor.out
tags: core
- func: rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: rand
autogen: rand.names_out
tags: nondeterministic_seeded
- func: rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand
autogen: rand.generator_with_names_out
- func: rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: [core, nondeterministic_seeded]
dispatch:
CompositeExplicitAutograd: rand
- func: rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand
- func: rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: rand_out
- func: rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
- func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: rand_like
autogen: rand_like.out
- func: randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
- func: randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
- func: randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
- func: randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint
- func: randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
- func: randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
- func: randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
- func: randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randint_out
- func: randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: randint_like
autogen: randint_like.out
- func: randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd: randint_like
autogen: randint_like.low_dtype_out
- func: randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: [core, nondeterministic_seeded]
dispatch:
CompositeExplicitAutograd: randn
- func: randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randn
- func: randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: randn
autogen: randn.names_out
- func: randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: randn
autogen: randn.generator_with_names_out
- func: randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
- func: randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
- func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd, CompositeImplicitAutogradNestedTensor: randn_like
autogen: randn_like.out
- func: randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: [core, nondeterministic_seeded]
dispatch:
CompositeExplicitAutograd: randperm
- func: randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randperm
- func: randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: randperm_out
- func: randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CPU: randperm_out_cpu
CUDA: randperm_out_cuda
MPS: randperm_out_mps
- func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: range
- func: range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: range
- func: range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: range_out_no_step
- func: range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, Meta: range_out
CUDA: range_cuda_out
MPS: range_mps_out
cpp_no_default_args: ['step']
- func: ravel(Tensor(a) self) -> Tensor(a)
variants: function, method
- func: reciprocal(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: reciprocal.out
variants: function, method
tags: [core, pointwise]
- func: reciprocal_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: reciprocal.out
variants: function, method
tags: pointwise
- func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: reciprocal_out
MPS: reciprocal_out_mps
tags: pointwise
- func: neg(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: neg.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: neg_sparse
SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg
tags: [core, pointwise]
- func: neg_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: neg.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: neg_sparse_
SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_neg_
tags: pointwise
- func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: neg_out
MPS: neg_out_mps
SparseCPU, SparseCUDA: neg_out_sparse
SparseCsrCPU, SparseCsrCUDA: neg_sparse_csr_out
tags: pointwise
# Alias for neg
- func: negative(Tensor self) -> Tensor
variants: function, method
- func: negative_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: repeat(Tensor self, SymInt[] repeats) -> Tensor
variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
dispatch:
CompositeExplicitAutograd: repeat
MPS: repeat_mps
autogen: repeat.out
tags: core
- func: repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
variants: function
dispatch:
CPU: repeat_interleave_cpu
CUDA: repeat_interleave_cuda
MPS: repeat_interleave_mps
tags: dynamic_output_shape
autogen: repeat_interleave.Tensor_out
- func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
variants: function, method
dispatch:
CompositeImplicitAutograd: repeat_interleave_symint
- func: repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
variants: function, method
dispatch:
CompositeImplicitAutograd: repeat_interleave_symint
- func: reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: reshape_symint
CompositeImplicitAutogradNestedTensor: reshape_nested_symint
- func: _reshape_copy(Tensor self, SymInt[] size) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _reshape_copy_symint
# NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape.
# They are not user-facing, hence the leading underscore. Please don't use it
# anywhere else.
- func: _reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA, ZeroTensor, MPS: _reshape_alias
# We don't need to support mkldnn since this is handled explicitly by the reshape operator.
- func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
MkldnnCPU: mkldnn_reshape
autogen: _mkldnn_reshape.out
- func: reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: reshape_as
CompositeImplicitAutogradNestedTensor: reshape_as_nested
- func: round(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: round.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: round_sparse
SparseCsrCPU, SparseCsrCUDA: round_sparse_csr
tags: [core, pointwise]
- func: round_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: round.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: round_sparse_
SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_
tags: pointwise
- func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU: round_out
CUDA: round_out
MPS: round_out_mps
SparseCPU, SparseCUDA: round_sparse_out
SparseCsrCPU, SparseCsrCUDA: round_sparse_csr_out
tags: pointwise
- func: round.decimals(Tensor self, *, int decimals) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: round.decimals_out
variants: function, method
tags: pointwise
- func: round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: round.decimals_out
variants: function, method
tags: pointwise
- func: round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU: round_decimals_out
CUDA: round_decimals_out
tags: pointwise
- func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
- func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
tags: nondeterministic_seeded
device_check: NoCheck # TensorIterator
- func: relu(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: relu
MPS: relu_mps
MkldnnCPU: mkldnn_relu
QuantizedCPU: relu_quantized_cpu
QuantizedCUDA: relu_quantized_cuda
NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu
SparseCPU, SparseCUDA: relu_sparse
SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr
tags: [core, pointwise]
- func: relu_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: relu_
MPS: relu_mps_
MkldnnCPU: mkldnn_relu_
QuantizedCPU: relu_quantized_cpu_
QuantizedCUDA: relu_quantized_cuda_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_relu_
SparseCPU, SparseCUDA: relu_sparse_
SparseCsrCPU, SparseCsrCUDA: relu_sparse_csr_
autogen: relu.out
tags: pointwise
- func: relu6(Tensor self) -> Tensor
python_module: nn
- func: relu6_(Tensor(a!) self) -> Tensor(a!)
python_module: nn
- func: prelu(Tensor self, Tensor weight) -> Tensor
variants: function, method
autogen: prelu.out
- func: _prelu_kernel(Tensor self, Tensor weight) -> Tensor
dispatch:
CPU, CUDA: _prelu_kernel
QuantizedCPU: _prelu_kernel_quantized_cpu
MkldnnCPU: mkldnn_prelu
MPS: prelu_mps
- func: _prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _prelu_kernel_backward
MkldnnCPU: mkldnn_prelu_backward
MPS: prelu_backward_mps
- func: gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU: gelu_out_cpu
CUDA: gelu_out_cuda
MPS: gelu_out_mps
- func: gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
structured_delegate: gelu.out
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
QuantizedCPU: gelu_quantized_cpu_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu_
- func: gelu(Tensor self, *, str approximate='none') -> Tensor
structured_delegate: gelu.out
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
MkldnnCPU: mkldnn_gelu
QuantizedCPU: gelu_quantized_cpu
QuantizedCUDA: gelu_quantized_cuda
NestedTensorCPU, NestedTensorCUDA: NestedTensor_gelu
tags: [core, pointwise]
- func: gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU: gelu_backward_out_cpu
CUDA: gelu_backward_out_cuda
MPS: gelu_backward_out_mps
- func: gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
structured_delegate: gelu_backward.grad_input
python_module: nn
dispatch:
MkldnnCPU: mkldnn_gelu_backward
NestedTensorCPU, NestedTensorCUDA: gelu_backwards_nested
tags: pointwise
- func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
variants: function
python_module: nn
device_check: NoCheck
device_guard: False
- func: hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: hardshrink_out
- func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
structured_delegate: hardshrink.out
device_check: NoCheck # TensorIterator
variants: function, method
- func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: hardshrink_backward_out
- func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
structured_delegate: hardshrink_backward.grad_input
variants: function, method
- func: rsqrt(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: rsqrt.out
variants: function, method
tags: [core, pointwise]
- func: rsqrt_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: rsqrt.out
variants: function, method
tags: pointwise
- func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: rsqrt_out
MPS: rsqrt_out_mps
tags: pointwise
- func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
- func: select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: select_symint
SparseCsrCPU, SparseCsrCUDA: select_sparse_csr
NestedTensorCPU, NestedTensorCUDA: select_nested
tags: core
- func: select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutogradNonFunctional: select_backward_symint
autogen: select_backward.out
- func: _nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
NestedTensorCPU, NestedTensorCUDA: _nested_select_backward_symint
- func: selu(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
- func: selu_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: celu(Tensor self, Scalar alpha=1.0) -> Tensor
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: celu
- func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: celu_
autogen: celu.out
- func: silu(Tensor self) -> Tensor
structured_delegate: silu.out
python_module: nn
dispatch:
NestedTensorCPU, NestedTensorCUDA: NestedTensor_silu
tags: pointwise
- func: silu_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: silu.out
python_module: nn
dispatch:
NestedTensorCPU, NestedTensorCUDA: NestedTensor_silu_
tags: pointwise
- func: silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: silu_out
MPS: silu_out_mps
tags: pointwise
- func: silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: silu_backward_out
MPS: silu_backward_out_mps
tags: pointwise
- func: silu_backward(Tensor grad_output, Tensor self) -> Tensor
structured_delegate: silu_backward.grad_input
python_module: nn
dispatch:
CompositeImplicitAutograd: math_silu_backward
NestedTensorCPU, NestedTensorCUDA: silu_backward_nested
tags: pointwise
- func: mish(Tensor self) -> Tensor
structured_delegate: mish.out
python_module: nn
- func: mish_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: mish.out
python_module: nn
- func: mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: mish_out
MPS: mish_out_mps
- func: mish_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: mish_backward
MPS: mish_backward_mps
CompositeImplicitAutograd: math_mish_backward
- func: sigmoid(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sigmoid.out
variants: function, method
dispatch:
QuantizedCPU: sigmoid_quantized_cpu
MkldnnCPU: mkldnn_sigmoid
tags: [core, pointwise]
- func: sigmoid_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sigmoid.out
variants: function, method
dispatch:
MkldnnCPU: mkldnn_sigmoid_
tags: pointwise
- func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sigmoid_out
MPS: sigmoid_out_mps
tags: pointwise
- func: logit(Tensor self, float? eps=None) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: logit
MPS: logit_mps
tags: pointwise
- func: logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
variants: function, method
dispatch:
CPU, CUDA: logit_
tags: pointwise
- func: logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: logit_out
MPS: logit_out_mps
tags: pointwise
- func: sin(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sin.out
variants: function, method
dispatch:
SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr
SparseCPU, SparseCUDA: sin_sparse
NestedTensorCPU, NestedTensorCUDA: sin_nested
tags: [core, pointwise]
- func: sin_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sin.out
variants: function, method
dispatch:
SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_
SparseCPU, SparseCUDA: sin_sparse_
tags: pointwise
- func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sin_out
MPS: sin_out_mps
SparseCsrCPU, SparseCsrCUDA: sin_sparse_csr_out
SparseCPU, SparseCUDA: sin_sparse_out
tags: pointwise
- func: sinc(Tensor self) -> Tensor
structured_delegate: sinc.out
variants: function, method
tags: pointwise
- func: sinc_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: sinc.out
variants: function, method
tags: pointwise
- func: sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sinc_out
tags: pointwise
- func: sinh(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sinh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sinh_sparse
SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr
tags: [core, pointwise]
- func: sinh_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sinh.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sinh_sparse_
SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_
tags: pointwise
- func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sinh_out
MPS: sinh_out_mps
SparseCPU, SparseCUDA: sinh_sparse_out
SparseCsrCPU, SparseCsrCUDA: sinh_sparse_csr_out
# Returns a copy of this `Variable` that is detached from its autograd graph.
# This method is OK to call if the `Variable` is a view.
#
# NOTE: Previously, if we change the tensor metadata (e.g. sizes / strides /
# storage / storage_offset) of a tensor created from `detach()`, those metadata
# in the original tensor will also be updated. However, the new behavior is that
# those metadata changes to the detached tensor will not update the original tensor
# anymore, and in the `detach()` function we need to set `allow_tensor_metadata_change_`
# to false to make such changes explicitly illegal, in order to prevent users from
# changing metadata of the detached tensor and expecting the original tensor to also
# be updated.
tags: pointwise
- func: detach(Tensor(a) self) -> Tensor(a)
variants: function, method
dispatch:
CompositeExplicitAutograd: detach
NestedTensorCPU, NestedTensorCUDA: detach
# Like `detach()`, but modifies this `Variable` in-place. This method may
# only be called on non-view `Variable`s. You can use `is_view()` to check
# this. If this `Variable` is a view, throws an `std::runtime_error()`.
- func: detach_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
tags: inplace_view
dispatch:
CompositeExplicitAutograd: detach_
- func: size.int(Tensor self, int dim) -> int
variants: function
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: size.Dimname(Tensor self, Dimname dim) -> int
variants: function, method
device_check: NoCheck
device_guard: False
- func: sym_size.int(Tensor self, int dim) -> SymInt
variants: function
device_check: NoCheck
device_guard: False
tags: core
manual_cpp_binding: True
- func: sym_numel(Tensor self) -> SymInt
variants: function
device_check: NoCheck
device_guard: False
tags: core
manual_cpp_binding: True
- func: sym_storage_offset(Tensor self) -> SymInt
variants: function
device_check: NoCheck
device_guard: False
tags: core
manual_cpp_binding: True
- func: slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice
tags: core
# NOTE: The implementation of split_with_sizes bypasses the dispatcher to call this; undo
# that if adding specific implementations here!
- func: slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice_backward
autogen: slice_backward.out
# NB: This op exists to back the implementation of reverse view_funcs for various views (chunk,
# slice.Tensor, split_with_sizes, et. al.). Currently, these are only used during fake-ification
# of PT2 graph input subclass instances that are views. This means:
# * This op shouldn't really show up in eager mode (so e.g. XLA shouldn't have to implement it)
# * This op shouldn't show up in a PT2 graph (so a PT2 backend shouldn't have to implement it)
# * A subclass will have to implement this to work in PT2 if a subclass view is used as a graph
# input AND the view utilizes this op in its inverse. The idea is that slice_inverse() is
# easier to implement for a subclass than as_strided()
- func: slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: slice_inverse_symint
- func: slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutogradNonFunctional: slice_scatter
autogen: slice_scatter.out
tags: [core, view_copy]
- func: select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutogradNonFunctional: select_scatter_symint
autogen: select_scatter.out
tags: core
- func: diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutogradNonFunctional: diagonal_scatter
autogen: diagonal_scatter.out
- func: as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutogradNonFunctional: as_strided_scatter_symint
autogen: as_strided_scatter.out
- func: smm(Tensor self, Tensor mat2) -> Tensor
variants: function, method
# softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
- func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CompositeExplicitAutograd: softmax_out
- func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
variants: function, method
- func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
structured_delegate: _softmax.out
dispatch:
MkldnnCPU: mkldnn_softmax
NestedTensorCPU, NestedTensorCUDA: softmax_nested
tags: core
- func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: softmax_cpu_out
CUDA: softmax_cuda_out
MPS: softmax_mps_out
- func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
structured_delegate: _softmax_backward_data.out
dispatch:
NestedTensorCPU, NestedTensorCUDA: nested_softmax_backward
- func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
dispatch:
CPU: softmax_backward_cpu_out
CUDA: softmax_backward_cuda_out
MPS: softmax_backward_mps_out
- func: unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsafe_split
autogen: unsafe_split.Tensor_out
- func: split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: split
- func: split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
variants: function, method
device_guard: False
dispatch:
CompositeImplicitAutograd: split_symint
- func: unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsafe_split_with_sizes
autogen: unsafe_split_with_sizes.out
- func: split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: split_with_sizes
NestedTensorCPU, NestedTensorCUDA: split_with_sizes_nested
tags: core
- func: hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
variants: function, method
- func: hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
variants: function, method
- func: vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
variants: function, method
- func: vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
variants: function, method
- func: dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
variants: function, method
- func: dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
variants: function, method
- func: squeeze(Tensor(a) self) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: squeeze
QuantizedCPU, QuantizedCUDA: squeeze_quantized
NestedTensorCPU, NestedTensorCUDA: squeeze_nested
- func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: squeeze
QuantizedCPU, QuantizedCUDA: squeeze_quantized
NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
tags: core
- func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
- func: squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: squeeze
QuantizedCPU, QuantizedCUDA: squeeze_quantized
NestedTensorCPU, NestedTensorCUDA: squeeze_dim_nested
tags: core
- func: squeeze_(Tensor(a!) self) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutograd: squeeze_
- func: squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutograd: squeeze_
- func: squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutograd: squeeze_
- func: squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
- func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
variants: function, method
- func: sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: _sspaddmm_out_only_sparse
CUDA: _sspaddmm_out_only_sparse_cuda
SparseCPU: _sspaddmm_out_cpu
SparseCUDA: _sspaddmm_out_cuda
- func: _chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor
dispatch:
CompositeExplicitAutograd: _chunk_cat
- func: _chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: _chunk_cat_out
- func: stack(Tensor[] tensors, int dim=0) -> Tensor
dispatch:
CompositeExplicitAutograd: stack
- func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: stack_out
- func: _stack(Tensor[] tensors, int dim=0) -> Tensor
dispatch: # match the backends supported by _cat
CPU: _stack_cpu
CompositeExplicitAutograd: _stack
- func: _stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
dispatch: # match the backends supported by _cat_out
CPU: _stack_out_cpu
CompositeExplicitAutograd: _stack_out
- func: hstack(Tensor[] tensors) -> Tensor
- func: hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- func: vstack(Tensor[] tensors) -> Tensor
- func: vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- func: dstack(Tensor[] tensors) -> Tensor
- func: dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
# Overload without center & pad mode, needed for forward-compatibility
- func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
variants: function, method
cpp_no_default_args: ['hop_length', 'win_length', 'window', 'normalized']
- func: stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
variants: function, method
- func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
variants: function, method
- func: stride.int(Tensor self, int dim) -> int
variants: function
device_check: NoCheck
device_guard: False
manual_cpp_binding: True
- func: stride.Dimname(Tensor self, Dimname dim) -> int
variants: function, method
device_check: NoCheck
device_guard: False
- func: sym_stride.int(Tensor self, int dim) -> SymInt
variants: function
device_check: NoCheck
device_guard: False
tags: core
manual_cpp_binding: True
- func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: sum
SparseCPU, SparseCUDA: sum_coo
SparseCsrCPU, SparseCsrCUDA: sum_csr
autogen: sum.out
- func: sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
# TODO: Align the signature of sum.dim_IntList and _sparse_csr_sum.dim_dtype
structured_delegate: sum.IntList_out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
NestedTensorCPU: NestedTensor_sum_dim_CPU
SparseCPU, SparseCUDA: sum_sparse_coo
SparseCsrCPU, SparseCsrCUDA: sum_sparse_compressed
tags: core
- func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: sum_out
MPS: sum_out_mps
- func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
# TODO: this function will be replaced once nested expand semantics have been settled on
- func: _nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
dispatch:
NestedTensorCPU: _nested_sum_backward_cpu
- func: nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: nansum
MPS: nansum_mps
- func: nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: nansum_out
MPS: nansum_out_mps
- func: sum_to_size(Tensor self, SymInt[] size) -> Tensor
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: sum_to_size_symint
- func: sqrt(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sqrt.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sqrt_sparse
SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr
tags: [core, pointwise]
- func: sqrt_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sqrt.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sqrt_sparse_
SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_
tags: pointwise
- func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sqrt_out
MPS: sqrt_out_mps
SparseCPU, SparseCUDA: sqrt_sparse_out
SparseCsrCPU, SparseCsrCUDA: sqrt_sparse_csr_out
tags: pointwise
- func: square(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
tags: pointwise
- func: square_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function, method
tags: pointwise
- func: square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
tags: pointwise
- func: std(Tensor self, bool unbiased=True) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ["unbiased"]
- func: std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ["unbiased"]
- func: std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: std
MPS: std_mps
QuantizedCPU: std_quantized_cpu
- func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
cpp_no_default_args: ["unbiased"]
- func: std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
cpp_no_default_args: ["unbiased"]
- func: std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: std_mean
MPS: std_mean_mps
autogen: std_mean.correction_out
- func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
cpp_no_default_args: ["unbiased"]
- func: std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
cpp_no_default_args: ["unbiased"]
- func: std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: std_out
QuantizedCPU: std_out_quantized_cpu
- func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ["unbiased"]
- func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
cpp_no_default_args: ["unbiased"]
- func: std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
- func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: prod
MPS: prod_mps
autogen: prod.out
tags: core
- func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
structured_delegate: prod.int_out
device_check: NoCheck # TensorIterator
variants: function, method
tags: core
- func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: prod_out
MPS: prod_out_mps
- func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: t(Tensor(a) self) -> Tensor(a)
device_check: NoCheck
device_guard: False
variants: function, method
dispatch:
CompositeExplicitAutograd: t
- func: t_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck
device_guard: False
variants: method
tags: inplace_view
dispatch:
CompositeExplicitAutograd: t_
- func: tan(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: tan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: tan_sparse
SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr
tags: [core, pointwise]
- func: tan_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: tan.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: tan_sparse_
SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_
tags: pointwise
- func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: tan_out
MPS: tan_out_mps
SparseCPU, SparseCUDA: tan_sparse_out
SparseCsrCPU, SparseCsrCUDA: tan_sparse_csr_out
tags: pointwise
- func: tanh(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: tanh.out
variants: function, method
dispatch:
QuantizedCPU: tanh_quantized_cpu
MkldnnCPU: mkldnn_tanh
SparseCPU, SparseCUDA: tanh_sparse
SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr
NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh
tags: [core, pointwise]
- func: tanh_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: tanh.out
variants: function, method
dispatch:
MkldnnCPU: mkldnn_tanh_
SparseCPU, SparseCUDA: tanh_sparse_
SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_
NestedTensorCPU, NestedTensorCUDA: NestedTensor_tanh_
tags: pointwise
- func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: tanh_out
MPS: tanh_out_mps
SparseCPU, SparseCUDA: tanh_sparse_out
SparseCsrCPU, SparseCsrCUDA: tanh_sparse_csr_out
tags: pointwise
- func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
variants: function
- func: tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
variants: function
# TODO: namespace threshold in 'nn'
- func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
structured_delegate: threshold.out
dispatch:
QuantizedCPU: threshold_quantized_cpu
- func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
structured_delegate: threshold.out
- func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: threshold_out
MPS: threshold_out_mps
- func: threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: threshold_backward_out
MPS: threshold_backward_out_mps
SparseCPU, SparseCUDA: threshold_backward_sparse_out
SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed_out
- func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
variants: function
structured_delegate: threshold_backward.grad_input
dispatch:
MkldnnCPU: mkldnn_relu_backward
SparseCPU, SparseCUDA: threshold_backward_sparse
SparseCsrCPU, SparseCsrCUDA: threshold_backward_sparse_compressed
NestedTensorCPU, NestedTensorCUDA: threshold_backwards_nested
tags: pointwise
- func: tile(Tensor self, SymInt[] dims) -> Tensor
variants: function, method
dispatch:
CompositeImplicitAutograd: tile_symint
- func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: transpose
NestedTensorCPU, NestedTensorCUDA: transpose_nested
- func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
- func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
MkldnnCPU: mkldnn_transpose
- func: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutograd: transpose_
- func: _mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
device_check: NoCheck
device_guard: False
dispatch:
MkldnnCPU: mkldnn_transpose_
autogen: _mkldnn_transpose.out
- func: one_hot(Tensor self, int num_classes=-1) -> Tensor
python_module: nn
variants: function
tags: dynamic_output_shape
- func: flip(Tensor self, int[] dims) -> Tensor
variants: function, method
dispatch:
CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip
MPS: flip_mps
autogen: flip.out
tags: core
- func: fliplr(Tensor self) -> Tensor
variants: function, method
- func: flipud(Tensor self) -> Tensor
variants: function, method
- func: roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor
variants: function, method
dispatch:
CPU, MPS: roll
CUDA: roll_cuda
autogen: roll.out
# default int[] value [0,1] should not add space after comma, since codegen parser uses ', ' to split args
- func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: rot90
autogen: rot90.out
- func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
- func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
- func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
# Fused implementation detail for transformers. Adds in-projection bias to QKV and divides Q by sqrt(D/num_heads).
- func: _transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
dispatch:
CPU, NestedTensorCPU: transform_bias_rescale_qkv_cpu
CUDA, NestedTensorCUDA: transform_bias_rescale_qkv_cuda
autogen: _transform_bias_rescale_qkv.out
- func: _nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
dispatch:
CPU, CUDA: NestedTensor_nested_tensor_from_mask
autogen: _nested_tensor_from_mask.out
- func: _nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
dispatch:
CPU, CUDA: NestedTensor_nested_tensor_from_mask_left_aligned
- func: _nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
device_check: NoCheck # cpu_nested_shape_example will always be on CPU
dispatch:
CPU: nested_from_padded_generic
CUDA: nested_from_padded_cuda
autogen: _nested_from_padded.out
# These private functions are temporary. They will be updated/deleted when nested tensors switch to using SymInts for their metadata representation
- func: _nested_tensor_size(Tensor self) -> Tensor
variants: method
dispatch:
NestedTensorCPU, NestedTensorCUDA: _nested_tensor_size
autogen: _nested_tensor_size.out
- func: _nested_tensor_strides(Tensor self) -> Tensor
variants: method
dispatch:
NestedTensorCPU, NestedTensorCUDA: _nested_tensor_strides
autogen: _nested_tensor_strides.out
- func: _nested_tensor_storage_offsets(Tensor self) -> Tensor
variants: method
dispatch:
NestedTensorCPU, NestedTensorCUDA, NestedTensorMeta: _nested_tensor_storage_offsets
autogen: _nested_tensor_storage_offsets.out
# _nested_from_padded is not usable from Python, so
# _nested_from_padded_and_nested_example is available for testing.
- func: _nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
dispatch:
NestedTensorCPU, NestedTensorCUDA: NestedTensor_from_padded_and_nested_example
autogen: _nested_from_padded_and_nested_example.out
# The input arguments' types to this functions are temporary. When nested tensors switch to using SymInts for their metadata representation
# this will need to be updated
- func: _nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)
variants: function
device_check: NoCheck
dispatch:
CPU, CUDA: _nested_view_from_buffer
- func: _nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor
variants: function
device_check: NoCheck
tags: view_copy
dispatch:
CompositeExplicitAutogradNonFunctional: _nested_view_from_buffer_copy
autogen: _nested_view_from_buffer_copy.out
- func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
dispatch:
# calls unsqueeze
CompositeExplicitAutogradNonFunctional: _trilinear
autogen: _trilinear.out
- func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
- func: trunc(Tensor self) -> Tensor
structured_delegate: trunc.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
SparseCPU, SparseCUDA: trunc_sparse
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr
tags: [core, pointwise]
- func: trunc_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: trunc.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
SparseCPU, SparseCUDA: trunc_sparse_
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_
tags: pointwise
- func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: trunc_out
MPS: trunc_out_mps
SparseCPU, SparseCUDA: trunc_sparse_out
SparseCsrCPU, SparseCsrCUDA: trunc_sparse_csr_out
tags: pointwise
# Alias for trunc
- func: fix(Tensor self) -> Tensor
variants: function, method
- func: fix_(Tensor(a!) self) -> Tensor(a!)
variants: function, method
- func: fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: type_as(Tensor self, Tensor other) -> Tensor
variants: method
- func: _has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
variants: function
- func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: _unique_cpu
CUDA: _unique_cuda
autogen: _unique.out
- func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_dim_cpu
CUDA: unique_dim_cuda
tags: dynamic_output_shape
autogen: unique_dim.out
- func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_consecutive_cpu
CUDA: unique_consecutive_cuda
MPS: unique_consecutive_mps
tags: dynamic_output_shape
autogen: unique_consecutive.out
- func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: unique_dim_consecutive_cpu
CUDA: unique_dim_consecutive_cuda
MPS: unique_dim_consecutive_mps
tags: dynamic_output_shape
autogen: unique_dim_consecutive.out
# _unique and _unique_dim are fragile and modifying them easily cause internal break
# the below operator is a temporary hack for adding return_counts support
# Please don't rely on these two operators, they will be removed soon
- func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU: _unique2_cpu
CUDA: _unique2_cuda
MPS: _unique2_mps
tags: dynamic_output_shape
autogen: _unique2.out
- func: _unsafe_view(Tensor self, SymInt[] size) -> Tensor
dispatch:
CompositeExplicitAutograd: _unsafe_view
autogen: _unsafe_view.out
- func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: unsqueeze
SparseCPU, SparseCUDA: unsqueeze_sparse
QuantizedCPU, QuantizedCUDA: unsqueeze_quantized
NestedTensorCPU, NestedTensorCUDA: unsqueeze_nested
tags: core
- func: unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
dispatch:
CompositeExplicitAutograd: unsqueeze_
- func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
- func: var(Tensor self, bool unbiased=True) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ["unbiased"]
- func: var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
tags: core
cpp_no_default_args: ["unbiased"]
- func: var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA: var
MPS: var_mps
tags: core
- func: var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
cpp_no_default_args: ["unbiased"]
- func: var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: var_out
- func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
cpp_no_default_args: ["unbiased"]
- func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
cpp_no_default_args: ["unbiased"]
- func: var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
- func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
cpp_no_default_args: ["unbiased"]
- func: var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
cpp_no_default_args: ["unbiased"]
- func: var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: var_mean
MPS: var_mean_mps
autogen: var_mean.correction_out
- func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
cpp_no_default_args: ["unbiased"]
- func: var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
device_check: NoCheck # TensorIterator
variants: function
- func: view_as(Tensor(a) self, Tensor other) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
- func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CPU, CUDA, MPS: where
tags: [core, pointwise]
- func: where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA, MPS: where_self_out
- func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
variants: function
- func: where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
variants: function, method
- func: where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
variants: function
- func: where(Tensor condition) -> Tensor[]
device_check: NoCheck # TensorIterator
variants: function
- func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
variants: function
# VariableType::_weight_norm does not want to be given a gap in the autograd graph,
# so we don't define "dispatch" variants for it.
- func: _weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
variants: function
- func: _weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: weight_norm_cpu
CUDA: weight_norm_cuda
MPS: weight_norm_mps
autogen: _weight_norm_interface.out
- func: _weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
variants: function
dispatch:
CPU: weight_norm_backward_cpu
CUDA: weight_norm_backward_cuda
MPS: weight_norm_backward_mps
autogen: _weight_norm_interface_backward.out
- func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
variants: function
- func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: zeros
autogen: zeros.names_out
- func: _efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: _efficientzerotensor
CUDA: _efficientzerotensor_cuda
MPS: _efficientzerotensor_mps
Meta: _efficientzerotensor_meta
autogen: _efficientzerotensor.out
- func: zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: zeros_symint
- func: zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: zeros_out
SparseCPU, SparseCUDA, SparseMeta: zeros_sparse_out
- func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
# NB: Although this composite mutates on the inside, it is
# non-differentiable so NonFunctional doesn't apply
CompositeExplicitAutograd, CompositeImplicitAutogradNestedTensor: zeros_like
autogen: zeros_like.out
- func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor
variants: function
dispatch:
CPU: _standard_gamma_grad_cpu
CUDA: _standard_gamma_grad_cuda
autogen: _standard_gamma_grad.out
- func: _standard_gamma(Tensor self, Generator? generator=None) -> Tensor
variants: function
dispatch:
CPU: _s_gamma_cpu
CUDA: _s_gamma_cuda
tags: nondeterministic_seeded
autogen: _standard_gamma.out
- func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
dispatch:
CPU: _dirichlet_grad_cpu
CUDA: _dirichlet_grad_cuda
autogen: _dirichlet_grad.out
- func: _sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
tags: nondeterministic_seeded
variants: function
dispatch:
CPU: _s_dirichlet_cpu
CUDA: _s_dirichlet_cuda
autogen: _sample_dirichlet.out
- func: poisson(Tensor self, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
dispatch:
CPU: _s_poisson_cpu
CUDA: _s_poisson_cuda
tags: nondeterministic_seeded
autogen: poisson.out
- func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
dispatch:
CPU: _s_binomial_cpu
CUDA: _s_binomial_cuda
tags: nondeterministic_seeded
autogen: binomial.out
# When more variants get ported to native, this dispatch will get more
# complicated
- func: native_norm(Tensor self, Scalar p=2) -> Tensor
dispatch:
SparseCPU, SparseCUDA: norm_sparse
autogen: native_norm.out
- func: native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
dispatch:
SparseCPU, SparseCUDA: norm_sparse
autogen: native_norm.ScalarOpt_dim_dtype_out
- func: _batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CPU: _batch_norm_with_update_cpu
CUDA: _batch_norm_with_update_cuda
MPS: _batch_norm_with_update_mps
MkldnnCPU: _batch_norm_with_update_mkldnn
autogen: _batch_norm_with_update_functional
- func: _batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
dispatch:
CPU: _batch_norm_with_update_cpu_out
CUDA: _batch_norm_with_update_cuda_out
MPS: _batch_norm_with_update_mps_out
- func: _batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
dispatch:
CompositeExplicitAutograd: _batch_norm_no_update
autogen: _batch_norm_no_update.out
- func: batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor)
dispatch:
CPU: _new_batch_norm_backward_cpu
CUDA: _new_batch_norm_backward_cuda
MPS: _new_batch_norm_backward_mps
MkldnnCPU: _new_batch_norm_backward_mkldnn
# TODO: reduce signatures down to one when optional args is available
- func: _sparse_sum(Tensor self) -> Tensor
- func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
- func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
dispatch:
CompositeExplicitAutograd: _sparse_sum
autogen: _sparse_sum.dim_out
- func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
- func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
dispatch:
SparseCPU: _sparse_sum_backward_cpu
SparseCUDA: _sparse_sum_backward_cuda
autogen: _sparse_sum_backward.out
- func: _sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
dispatch:
SparseCsrCPU: _sparse_csr_sum_cpu
SparseCsrCUDA: _sparse_csr_sum_cuda
autogen: _sparse_csr_sum.dim_dtype_out
- func: _sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
dispatch:
SparseCsrCPU: _sparse_csr_prod_cpu
SparseCsrCUDA: _sparse_csr_prod_cuda
autogen: _sparse_csr_prod.dim_dtype_out
- func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
python_module: sparse
variants: function
- func: _sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
python_module: sparse
variants: function
- func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
python_module: sparse
dispatch:
SparseCPU: softmax_sparse_cpu
SparseCUDA: softmax_sparse_cuda
autogen: _sparse_softmax.out
- func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
dispatch:
SparseCPU: softmax_backward_sparse_cpu
SparseCUDA: softmax_backward_sparse_cuda
autogen: _sparse_softmax_backward_data.out
- func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
python_module: sparse
variants: function
- func: _sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
python_module: sparse
variants: function
- func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
python_module: sparse
dispatch:
SparseCPU: log_softmax_sparse_cpu
SparseCUDA: log_softmax_sparse_cuda
autogen: _sparse_log_softmax.out
- func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
dispatch:
SparseCPU: log_softmax_backward_sparse_cpu
SparseCUDA: log_softmax_backward_sparse_cuda
autogen: _sparse_log_softmax_backward_data.out
- func: _spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
python_module: sparse
dispatch:
CPU: spdiags
autogen: _spdiags.out
- func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: norm
autogen: norm.ScalarOpt_dtype_out
- func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: norm
autogen: norm.Scalar_out
- func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
structured_delegate: norm.dtype_out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sparse_dtype_norm
- func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
structured_delegate: norm.out
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sparse_norm
- func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: norm_dtype_out
MPS: norm_dtype_out_mps
- func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
structured: True
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: norm_out
MPS: norm_out_mps
# These four redispatch in their implementation, so OK to be CompositeImplicitAutograd
- func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
variants: method, function
dispatch:
CompositeExplicitAutograd: frexp
tags: pointwise
- func: frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
dispatch:
CPU, CUDA: frexp_out
tags: pointwise
# Deprecated (v.1.12)
- func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
variants: function
# Deprecated (v.1.12)
- func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
variants: function
# Deprecated (v.1.12)
- func: nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
variants: function
# Deprecated (v.1.12)
- func: nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
variants: function
# Deprecated (v.1.12)
- func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
variants: function
# Deprecated (v.1.12)
- func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
variants: function
- func: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: clone
SparseCPU, SparseCUDA: clone_sparse
SparseCsrCPU, SparseCsrCUDA: clone_sparse_compressed
MkldnnCPU: mkldnn_clone
QuantizedCPU, QuantizedCUDA: quantized_clone
NestedTensorCPU, NestedTensorCUDA: clone_nested
autogen: clone.out
tags: [core, pointwise]
- func: positive(Tensor(a) self) -> Tensor(a)
variants: function, method
tags: pointwise
- func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
dispatch:
CompositeExplicitAutograd: resize_as_
autogen: resize_as, resize_as.out
tags: inplace_view
- func: resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: function, method
dispatch:
SparseCPU, SparseCUDA: resize_as_sparse_
SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_compressed_
autogen: resize_as_sparse, resize_as_sparse.out
- func: zero_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: zero_
MPS: zero_mps_
Meta: zero_meta_
SparseCPU, SparseCUDA, SparseMeta: zero_sparse_
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: zero_sparse_csr_
MkldnnCPU: mkldnn_zero_
NestedTensorCPU, NestedTensorCUDA: zero_nested_
autogen: zero, zero.out
- func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sub_out
MPS: sub_out_mps
SparseCPU, SparseCUDA: sub_out_sparse
tags: pointwise
- func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: sub.out
dispatch:
SparseCPU, SparseCUDA: sub_sparse
ZeroTensor: sub_zerotensor
NestedTensorCPU, NestedTensorCUDA: NestedTensor_sub_Tensor
tags: [core, pointwise]
- func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: sub.out
dispatch:
SparseCPU, SparseCUDA: sub_sparse_
tags: pointwise
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: sub
tags: [core, pointwise]
- func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: sub_
autogen: sub.Scalar_out
tags: pointwise
# subtract, alias for sub
- func: subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
variants: function, method
- func: subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
variants: method
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
variants: function, method
- func: subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
variants: method
- func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA: rsub
autogen: rsub.Tensor_out
- func: heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: heaviside_out
tags: pointwise
- func: heaviside(Tensor self, Tensor values) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: heaviside.out
tags: pointwise
- func: heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: heaviside.out
# For C++ only, until we have conversion from C++ numbers to Tensor
- func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: rsub
autogen: rsub.Scalar_out
# Functionally the same as addmm, but we give it a different derivative formula
# that doesn't propagate gradients to non-present entries on sparse.
tags: pointwise
- func: _sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
python_module: sparse
dispatch:
CompositeExplicitAutograd: _sparse_addmm
autogen: _sparse_addmm.out
- func: sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
python_module: sparse
dispatch:
SparseCsrCUDA: sparse_sampled_addmm_out_sparse_csr_cuda
SparseCsrCPU: sparse_sampled_addmm_out_sparse_csr_cpu
- func: sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
python_module: sparse
dispatch:
SparseCsrCUDA: sparse_sampled_addmm_sparse_csr_cuda
SparseCsrCPU: sparse_sampled_addmm_sparse_csr_cpu
- func: _sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
python_module: sparse
dispatch:
SparseCsrCPU: _sparse_mm_reduce_impl_sparse_csr_cpu
- func: _sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
python_module: sparse
dispatch:
SparseCsrCPU: _sparse_mm_reduce_impl_backward_sparse_csr_cpu
- func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: addmm_out_cpu
CUDA: addmm_out_cuda
MPS: addmm_out_mps
SparseCPU: addmm_out_sparse_dense_cpu
SparseCUDA: addmm_out_sparse_dense_cuda
SparseCsrCPU: addmm_out_sparse_compressed_cpu
SparseCsrCUDA: addmm_out_sparse_compressed_cuda
- func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
structured_delegate: addmm.out
variants: function, method
dispatch:
SparseCPU: addmm_sparse_dense_cpu
SparseCUDA: addmm_sparse_dense_cuda
SparseCsrCPU, SparseCsrCUDA: addmm_sparse_compressed_dense
tags: core
- func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
structured_delegate: addmm.out
variants: method
dispatch:
# Warning! For whatever reason, the inplace sparse addmm is NON
# broadcasting
SparseCPU: s_addmm_sparse_dense_cpu_
SparseCUDA: s_addmm_sparse_dense_cuda_
- func: _addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: addmm_activation_out_cpu
CUDA: addmm_activation_out_cuda
- func: _addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
structured_delegate: _addmm_activation.out
variants: function, method
- func: _scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor)
variants: function
dispatch:
CUDA: _scaled_mm_cuda
- func: _scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!))
variants: function
dispatch:
CUDA: _scaled_mm_out_cuda
# NOTE [ Sparse: autograd and API ]
#
#
# Sparse Tensor Constructors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The API entry points to sparse tensor construction should be
# `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`. Depending on whether the
# indices and values tensors are given, they eventually dispatch to either
# `sparse_coo_tensor_with_dims` or `sparse_coo_tensor_with_dims_and_tensors`.
#
# The autograd support for ctor is implement on `sparse_coo_tensor_with_dims_and_tensors`.
#
# The API methods `sparse_coo tensor` and `_sparse_coo_tensor_unsafe`
# **must not** have specific type dispatches because otherwise codegen will
# consider them as abstract methods (see Note [Abstract ATen methods]), dispatch
# using **Tensor** type, and thus lose autograd tracking on the actual method
# they dispatch to, e.g., `sparse_coo_tensor_with_dims_and_tensors`.
#
#
# Sparse Methods API Design
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Goals: 1. Flexible API for users to write custom sparse ops
# 2. ctor and member accessor with autograd support
#
# To achieve 1, we need to provide a set of *dangerous* APIs (dangerous in the
# sense that misusing them will break sparse tensor invariant and may out in
# unexpected behavior, e.g., crash). These methods are all prefixed with
# underscore "_" to indicate that they should be used with care. We provide:
#
# + `_indices()`: returns the *raw* indices within the sparse tensor (not just
# sharing storage). Any inplace operation will change the
# actual indices, including t_, set_, as_strided_, resize_,
# etc.
# + `_values()`: returns the *raw* values within the sparse tensor. Similar
# semantics as `_indices()`
# + `_nnz()`: returns the number of non-zero entries. This will always be
# determined by the shapes of indices and values.
# + `_coalesced_(bool)`: inplace sets whether the tensor is coalesced, and
# returns itself.
#
# These methods are very useful in writing new operations, e.g., a custom
# autograd Function.
#
# We also provide other public *safe* APIs:
# + `indices()`: returns a **view** of the indices tensor if the sparse tensor
# is **coalesced**.
# + `values()`: returns a **view** of the values tensor if the containing
# sparse tensor is **coalesced**.
# + `sparse_dim()`: number of sparse dimensions
# + `dense_dim()`: number of dense dimensions
# + `is_coalesced()`: whether the sparse tensor is coalesced
#
# `_indices()` and `_values()` should returns the raw indices and values dense
# tensors within a sparse tensor. They can be quite unsafe with inplace
# operations like `t_()`, and exposes uncoalesced indices and values. The public
# recommended API is `indices()` and `values()`, both of which first check that
# the tensor is coalesced and return views on those tensors.
#
#
# Autograd Support
# ~~~~~~~~~~~~~~~~
#
# Autograd is supported on `values()` and sparse tensor ctor with indices and
# values tensors. E.g., `torch.sparse_coo_tensor(i, v).values().sum()` is
# differentiable w.r.t. `v`.
#
# NB: The `values()` and `_values()` operators are special in that they are
# layout-aware, i.e., the output depends not just on the data it represents, but
# also on the input layout details (in this case, the `indices` tensor). See
# NOTE [ as_strided Backward and layout-aware/agnostic autograd ] in Functions.cpp
# for discussion on layout-aware vs layout-agnostic autograd. Since PyTorch ops
# operate in the layout-agnostic mode, similar to `as_strided`, backward of
# these two operators need to consider them in a layout-agnostic way:
# + `values()`:
# Input is coalesced.
# We just pretend having `input.indices()` as an additional argument
# `input_indices`, then forward is similar to
# `input.to(kStrided).index_select(input_indices)` regardless of the layout.
# Note that `values()` normally is layout-aware even if we constrain
# ourselves on sparse inputs since it may include all zeros values entries
# as "present" entries.
# + `_values()`:
# Input may be uncoalesced.
# It is not straightforward to construct a layout-agnostic version because
# duplicate indices entries may exist and additional parameterization is
# needed to distribute the value into different values entries. Furthermore,
# this op is intended to provide ways to write custom sparse ops, rather
# than being used in autograd graph, so it is marked as *non-differentiable*
# in derivatives.yaml.
#
# Before reading the following, see NOTE [ Autograd Variable Views ] in
# variable.h for details on views that are tracked by autograd, and views that
# are not.
#
# Moreover, these methods return tensors that share storage with inputs, so we
# mark these methods as view ops to support autograd history tracking.
# The sparse tensor ctor output should technically be view of both input indices
# and values tensors, but currently we only support setting as view of a single
# Variable, so it is only view of the values tensor.
# TODO: clone indices in sparse tensor ctor.
#
# For other methods that return outputs that share storage with inputs, i.e.,
# `indices()` and `_indices()`. We mark their outputs as non-differentiable, so
# the view relation is not tracked by autograd, but the version counter is still
# shared. In other words, their outputs are non-differentiable views of the
# sparse tensor.
# FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given
# the default would never make sense.
- func: sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
CompositeExplicitAutograd: sparse_compressed_tensor
- func: sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
CompositeExplicitAutograd: sparse_compressed_tensor
- func: sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
- func: _sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeImplicitAutograd: _sparse_compressed_tensor_unsafe_symint
- func: _sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: _sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: _sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: _sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
CompositeExplicitAutograd: sparse_coo_tensor
autogen: sparse_coo_tensor.size_out
- func: sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
- func: sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
- func: _sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
dispatch:
CompositeImplicitAutograd: _sparse_coo_tensor_unsafe_symint
- func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
- func: _validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
- func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
- func: _validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
- func: _validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
- func: _validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
- func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
dispatch:
SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_sparse
autogen: _sparse_coo_tensor_with_dims.out
- func: _sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
dispatch:
SparseCPU, SparseCUDA, SparseMeta, Meta: new_with_dims_and_tensor_sparse_symint
autogen: _sparse_coo_tensor_with_dims_and_tensors.out
- func: sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: sparse_resize_
autogen: sparse_resize, sparse_resize.out
- func: sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: sparse_resize_and_clear_
autogen: sparse_resize_and_clear, sparse_resize_and_clear.out
- func: sparse_mask(Tensor self, Tensor mask) -> Tensor
variants: method
dispatch:
SparseCPU, SparseCUDA: sparse_mask
SparseCsrCPU, SparseCsrCUDA: sparse_mask_sparse_compressed
autogen: sparse_mask.out
- func: _sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
variants: method
dispatch:
SparseCPU, SparseCUDA: sparse_mask_projection
autogen: _sparse_mask_projection.out
- func: _to_cpu(Tensor[] tensors) -> Tensor[]
variants: function
- func: to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor
variants: method
# Special case of to_dense with custom derivative
- func: _to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
variants: method
dispatch:
SparseCPU, SparseCUDA: sparse_to_dense
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_dense
MkldnnCPU: mkldnn_to_dense
autogen: _to_dense.out
- func: to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor
- func: sparse_dim(Tensor self) -> int
variants: method
dispatch:
CPU, CUDA: sparse_dim_strided
SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sparse_dim_sparse_csr
device_check: NoCheck
device_guard: False
# legacy method
- func: _dimI(Tensor self) -> int
variants: method
dispatch:
SparseCPU, SparseCUDA: sparse_dim_sparse
device_check: NoCheck
device_guard: False
- func: dense_dim(Tensor self) -> int
variants: method
dispatch:
CPU, CUDA: dense_dim_strided
SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: dense_dim_sparse_csr
device_check: NoCheck
device_guard: False
# legacy method
- func: _dimV(Tensor self) -> int
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
device_check: NoCheck
device_guard: False
- func: _nnz(Tensor self) -> int
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: _nnz_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: _nnz_sparse_csr
device_check: NoCheck
device_guard: False
# NOTE: [ coalesce autograd ]
# coalesce returns self directly for already coalesced sparse tensors.
# This means coalesce cannot have a derivative registered, otherwise it creates
# circular references in the autograd graph (see gh-52874).
# Instead, the derivative is registered on the slow-path "_coalesce"
- func: coalesce(Tensor(a) self) -> Tensor(a)
variants: method
- func: _coalesce(Tensor self) -> Tensor
dispatch:
SparseCPU: _coalesce_sparse_cpu
SparseCUDA: _coalesce_sparse_cuda
autogen: _coalesce.out
- func: is_coalesced(Tensor self) -> bool
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: is_coalesced_sparse
CompositeExplicitAutograd: is_coalesced_default
device_check: NoCheck
device_guard: False
- func: _indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: _indices_sparse
device_check: NoCheck
device_guard: False
- func: _values(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: _values_sparse
device_check: NoCheck
device_guard: False
# This method doesn't do any check but only directly sets the flag. So it can be
# a bit unsafe. Similar to _indices and _values, this is useful for implementing
# custom sparse operations in Python/C++ extension.
- func: _coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: _coalesced_sparse_
device_check: NoCheck
device_guard: False
autogen: _coalesced, _coalesced.out
- func: indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: indices_sparse
CompositeExplicitAutograd: indices_default
device_check: NoCheck
device_guard: False
- func: values(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMeta: values_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: values_sparse_csr
NestedTensorCPU, NestedTensorCUDA: values_nested
CompositeExplicitAutograd: values_default
device_check: NoCheck
device_guard: False
- func: crow_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: crow_indices_sparse_csr
CompositeExplicitAutograd: crow_indices_default
device_check: NoCheck
device_guard: False
- func: col_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: col_indices_sparse_csr
CompositeExplicitAutograd: col_indices_default
device_check: NoCheck
device_guard: False
- func: ccol_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: ccol_indices_sparse_csr
CompositeExplicitAutograd: ccol_indices_default
device_check: NoCheck
device_guard: False
- func: row_indices(Tensor(a) self) -> Tensor(a)
variants: method
dispatch:
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: row_indices_sparse_csr
CompositeExplicitAutograd: row_indices_default
device_check: NoCheck
device_guard: False
- func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
SparseCPU: hspmm_out_sparse_cpu
SparseCUDA: hspmm_out_sparse_cuda
- func: hspmm(Tensor mat1, Tensor mat2) -> Tensor
dispatch:
SparseCPU: hspmm_sparse_cpu
SparseCUDA: hspmm_sparse_cuda
- func: copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
device_check: NoCheck # Allows copy into different device
variants: function
dispatch:
SparseCPU, SparseCUDA: copy_sparse_
autogen: copy_sparse_to_sparse, copy_sparse_to_sparse.out
# By adding the AutogradNestedTensor this makes this function CompositeImplicit-like for nested tensors
- func: unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
variants: function, method
dispatch:
CompositeExplicitAutograd: unbind
NestedTensorCPU, NestedTensorCUDA: NestedTensor_unbind
- func: unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
variants: function, method
- func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
variants: method
# Special case of to_sparse.sparse_dim with custom derivative
- func: _to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
SparseCPU, SparseCUDA: sparse_coo_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
autogen: _to_sparse.sparse_dim_out
- func: to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
variants: method
# Special case of to_sparse with custom derivative
- func: _to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
SparseCPU, SparseCUDA: sparse_coo_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse
autogen: _to_sparse.out
- func: to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
variants: method
# Special case of to_sparse_csr with custom derivative
- func: _to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_csr
SparseCPU, SparseCUDA: coo_to_sparse_csr
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csr
autogen: _to_sparse_csr.out
- func: to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
variants: method
# Special case of to_sparse_csc with custom derivative
- func: _to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_csc
SparseCPU, SparseCUDA: coo_to_sparse_csc
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_csc
autogen: _to_sparse_csc.out
- func: to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
variants: method
# Special case of to_sparse_bsr with custom derivative
- func: _to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_bsr
SparseCPU, SparseCUDA: coo_to_sparse_bsr
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsr
autogen: _to_sparse_bsr.out
- func: to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
variants: method
# Special case of to_sparse_bsc with custom derivative
- func: _to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse_bsc
SparseCPU, SparseCUDA: coo_to_sparse_bsc
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse_bsc
autogen: _to_sparse_bsc.out
- func: _to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
variants: function
dispatch:
CUDA: _to_sparse_semi_structured
- func: to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
variants: method
dispatch:
CPU: dense_to_mkldnn
autogen: to_mkldnn.out
- func: mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
variants: function
python_module: nn
dispatch:
MkldnnCPU: mkldnn_reorder_conv2d_weight
autogen: mkldnn_reorder_conv2d_weight.out
- func: mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1) -> Tensor
variants: function
python_module: nn
dispatch:
MkldnnCPU: mkldnn_reorder_conv3d_weight
autogen: mkldnn_reorder_conv3d_weight.out
- func: to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
- func: quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_tensor_dynamic
autogen: quantize_per_tensor_dynamic.out
- func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_tensor
autogen: quantize_per_tensor.out
- func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_tensor_tensor_qparams
autogen: quantize_per_tensor.tensor_qparams_out
- func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
variants: function
dispatch:
CPU: quantize_per_tensor_list_cpu
autogen: quantize_per_tensor.tensors_out
- func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
variants: function
dispatch:
CPU, CUDA: quantize_per_channel
autogen: quantize_per_channel.out
- func: dequantize.self(Tensor self) -> Tensor
variants: function, method
dispatch:
CPU, CUDA: dequantize_cpu_or_cuda
QuantizedCPU, QuantizedCUDA: dequantize_quantized
autogen: dequantize.self_out
- func: dequantize.tensors(Tensor[] tensors) -> Tensor[]
variants: function
dispatch:
QuantizedCPU: dequantize_tensors_quantized_cpu
autogen: dequantize.tensors_out
- func: q_scale(Tensor self) -> float
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_scale_quant
- func: q_zero_point(Tensor self) -> int
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_zero_point_quant
- func: q_per_channel_scales(Tensor self) -> Tensor
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_per_channel_scales
autogen: q_per_channel_scales.out
- func: q_per_channel_zero_points(Tensor self) -> Tensor
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_per_channel_zero_points
autogen: q_per_channel_zero_points.out
- func: q_per_channel_axis(Tensor self) -> int
variants: function, method
dispatch:
QuantizedCPU, QuantizedCUDA: q_per_channel_axis
- func: int_repr(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
QuantizedCPU: int_repr_quantized_cpu
QuantizedCUDA: int_repr_quantized_cuda
autogen: int_repr.out
- func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
dispatch:
CPU: make_per_tensor_quantized_tensor_cpu
CUDA: make_per_tensor_quantized_tensor_cuda
autogen: _make_per_tensor_quantized_tensor.out
- func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
dispatch:
CPU: make_per_channel_quantized_tensor_cpu
CUDA: make_per_channel_quantized_tensor_cuda
autogen: _make_per_channel_quantized_tensor.out
- func: qscheme(Tensor self) -> QScheme
variants: method
dispatch:
QuantizedCPU, QuantizedCUDA: qscheme_quant
- func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
- func: fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
- func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
variants: function
dispatch:
CPU, CUDA: fake_quantize_per_tensor_affine_cachemask
autogen: fake_quantize_per_tensor_affine_cachemask.out
- func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
variants: function
dispatch:
CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams
autogen: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out
- func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
variants: function
- func: _fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
variants: function
dispatch:
CPU, CUDA: _fake_quantize_learnable_per_tensor_affine
autogen: _fake_quantize_learnable_per_tensor_affine.out
- func: _fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU, CUDA: _fake_quantize_learnable_per_tensor_affine_backward
- func: fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
- func: fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
variants: function
dispatch:
CPU, CUDA: fake_quantize_per_channel_affine_cachemask
autogen: fake_quantize_per_channel_affine_cachemask.out
- func: fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
variants: function
- func: _fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
variants: function
dispatch:
CPU, CUDA: _fake_quantize_learnable_per_channel_affine
autogen: _fake_quantize_learnable_per_channel_affine.out
- func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
variants: function
dispatch:
CPU, CUDA: _fake_quantize_learnable_per_channel_affine_backward
- func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
variants: function
- func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
dispatch:
CPU: fused_moving_avg_obs_fake_quant_cpu
CUDA: fused_moving_avg_obs_fake_quant_cuda
autogen: _fused_moving_avg_obs_fq_helper_functional, _fused_moving_avg_obs_fq_helper.out
- func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
variants: function
- func: _saturate_weight_to_fp16(Tensor weight) -> Tensor
variants: function
- func: choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
variants: function
- func: _autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
variants: method
device_guard: False
- func: _autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
variants: method
device_guard: False
- func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: _to_copy
NestedTensorCPU, NestedTensorCUDA: _to_copy_nested
autogen: _to_copy.out
tags: core
# to(Device) must not exist because all constructors of Device also works for
# TensorOptions. Otherwise, an ambiguity error is thrown.
# See NOTE [ TensorOptions Constructors ].
- func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
- func: to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
- func: to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
- func: to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
- func: meshgrid(Tensor[] tensors) -> Tensor[]
# TODO: Two weeks after this lands, combine these two overloads,
# making "indexing" optional. These are temporarily distinct for
# forward-compatibility reasons.
- func: meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
- func: cartesian_prod(Tensor[] tensors) -> Tensor
variants: function
- func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
variants: function
- func: item(Tensor self) -> Scalar
tags: data_dependent_output
variants: method
- func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
variants: function
- func: result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
variants: function
- func: result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
variants: function
- func: result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
- func: can_cast(ScalarType from, ScalarType to) -> bool
variants: function
- func: promote_types(ScalarType type1, ScalarType type2) -> ScalarType
variants: function
# NB: Does NOT check precondition that numel == 1
- func: _local_scalar_dense(Tensor self) -> Scalar
tags: [core, data_dependent_output]
dispatch:
CPU: _local_scalar_dense_cpu
CUDA: _local_scalar_dense_cuda
MPS: _local_scalar_dense_mps
variants: function
# MPS LSTM implementation
- func: _lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
MPS: _lstm_mps
autogen: _lstm_mps.out
tags: nondeterministic_seeded
- func: lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
dispatch:
MPS: lstm_mps_backward
autogen: lstm_mps_backward.out
# Fused RNN kernels
- func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_lstm_cell_cuda
autogen: _thnn_fused_lstm_cell.out
# NB: The composite version of this function below is a simple wrapper that duplicates some of the outputs
# It is necessary to avoid triggering TensorImpl use count checks in debug mode
# NB: this is function is NOT differentiable
- func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_lstm_cell_backward_impl_cuda
autogen: _thnn_fused_lstm_cell_backward_impl.out
- func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
- func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_gru_cell_cuda
autogen: _thnn_fused_gru_cell.out
- func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
dispatch:
CUDA: _thnn_fused_gru_cell_backward_cuda
autogen: _thnn_fused_gru_cell_backward.out
- func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
# RNN cells and layers
- func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
tags: nondeterministic_seeded
- func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
tags: nondeterministic_seeded
- func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
tags: nondeterministic_seeded
- func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
tags: nondeterministic_seeded
- func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
tags: nondeterministic_seeded
- func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
tags: nondeterministic_seeded
- func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
tags: nondeterministic_seeded
- func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
tags: nondeterministic_seeded
- func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
- func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
- func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
- func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
# Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp`
# Quantized RNN layers
# - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
# - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
# Quantized GRU layers
# - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
#
# - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
#
# Quantized RNN cells
- func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
- func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
- func: quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
- func: quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
# PackedSequence utilities
- func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
dispatch:
CompositeExplicitAutograd: _pack_padded_sequence
autogen: _pack_padded_sequence.out
- func: _pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
dispatch:
CompositeImplicitAutograd: _pack_padded_sequence_backward_symint
- func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
# wrappers for legacy TH methods
- func: set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, MPS: set_
autogen: set.source_Storage, set.source_Storage_out
tags: inplace_view
- func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU: set_storage_cpu_
Meta: set_storage_meta__symint
CUDA: set_storage_cuda_
MPS: set_storage_mps_
QuantizedCPU, QuantizedCUDA: set_storage_quantized_
autogen: set.source_Storage_storage_offset, set.source_Storage_storage_offset_out
tags: inplace_view
- func: set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: set__symint
tags: inplace_view
- func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, MPS: set_tensor_
autogen: set.source_Tensor, set.source_Tensor_out
tags: inplace_view
- func: set_(Tensor(a!) self) -> Tensor(a!)
variants: method
dispatch:
CPU: set_cpu_
CUDA: set_cuda_
Meta: set_meta_
MPS: set_mps_
autogen: set, set.out
tags: inplace_view
# Not making it CompositeImplicitAutograd because lift
# should be a primitive w.r.t. functorch
# TODO: this should have a view annotation
# TODO: shouldn't be a method
- func: lift(Tensor self) -> Tensor
dispatch:
CompositeExplicitAutograd: lift
autogen: lift.out
# lift_fresh is called with an argument that is guaranteed to be
# fresh (i.e., newly allocated). This is ONLY called from a
# torch.tensor call; if you FX trace a lift_fresh, you are obligated
# to convert this into a lift_fresh_copy (because FX will violate the
# freshness invariant when tracing).
- func: lift_fresh(Tensor(a) self) -> Tensor(a)
dispatch:
CompositeExplicitAutograd: lift_fresh
# Like lift, but it clones the input.
- func: lift_fresh_copy(Tensor self) -> Tensor
tags: view_copy
dispatch:
CompositeExplicitAutogradNonFunctional: lift_fresh_copy
autogen: lift_fresh_copy.out
- func: is_set_to(Tensor self, Tensor tensor) -> bool
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, MPS: is_set_to
- func: masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU: masked_fill__cpu
CUDA: masked_fill__cuda
QuantizedCPU: masked_fill__quantized_cpu
QuantizedCUDA: masked_fill__quantized_cuda
MPS: masked_fill__mps
autogen: masked_fill.Scalar_out
- func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: masked_fill
NestedTensorCPU, NestedTensorCUDA: NestedTensor_masked_fill
tags: pointwise
- func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU: masked_fill__cpu
CUDA: masked_fill__cuda
QuantizedCPU: masked_fill__quantized_cpu
QuantizedCUDA: masked_fill__quantized_cuda
MPS: masked_fill__mps
autogen: masked_fill.Tensor_out
- func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: masked_fill
- func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
variants: method
dispatch:
CPU: masked_scatter__cpu
CUDA: masked_scatter__cuda
MPS: masked_scatter__mps
autogen: masked_scatter.out
- func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: masked_scatter
- func: masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
dispatch:
CompositeExplicitAutograd: masked_scatter_backward_symint
- func: _masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
dispatch:
CUDA: masked_softmax_cuda
CPU: masked_softmax_cpu
autogen: _masked_softmax.out
- func: _masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
dispatch:
CUDA: masked_softmax_backward_cuda
CPU: masked_softmax_backward_cpu
autogen: _masked_softmax_backward.out
- func: view(Tensor(a) self, SymInt[] size) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
ZeroTensor, Meta, CPU, CUDA, QuantizedCPU, QuantizedCUDA, MPS: view
MkldnnCPU: mkldnn_view
NestedTensorCPU, NestedTensorCUDA: view_nested
tags: core
# Warning: If you want to change the name or overload name of this
# operator, you might also want to change the `isBlockListedSchema`
# function in `torch/csrc/jit/frontend/schema_catching.cpp`.
# The name and overload name of this operator is hardcoded in that
# function in order to workaround a bug:
# https://github.com/pytorch/pytorch/issues/47964
- func: view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: view_dtype
- func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
variants: method
dispatch:
CPU, CUDA: put_
autogen: put.out
- func: put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
variants: function, method
dispatch:
CompositeExplicitAutograd: put
- func: index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
precomputed:
- dim -> int dim
dispatch:
CPU: index_add_cpu_out
CUDA: index_add_cuda_out
MPS: index_add_mps_out
- func: index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
structured_delegate: index_add.out
variants: method
- func: index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
structured_delegate: index_add.out
variants: function, method
- func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
variants: function, method
- func: index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
precomputed:
- dim -> int dim
dispatch:
CPU: index_reduce_cpu_out
CUDA: index_reduce_cuda_out
- func: index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
structured_delegate: index_reduce.out
variants: method
- func: index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
structured_delegate: index_reduce.out
variants: function, method
- func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU: index_fill_
CUDA: index_fill_
MPS: index_fill_mps_
autogen: index_fill.int_Scalar_out
- func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: index_fill
- func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: index_fill_
MPS: index_fill_mps_
autogen: index_fill.int_Tensor_out
- func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
dispatch:
CompositeExplicitAutograd: index_fill
- func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
- func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
structured_delegate: scatter.src_out
variants: function, method
tags: core
- func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
structured_delegate: scatter.src_out
variants: method
- func: scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU, CUDA: scatter_src_out
MPS: scatter_src_out_mps
- func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
structured_delegate: scatter.value_out
variants: function, method
tags: core
- func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
structured_delegate: scatter.value_out
variants: method
- func: scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU, CUDA: scatter_value_out
MPS: scatter_value_out_mps
- func: scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
structured_delegate: scatter.reduce_out
variants: function, method
- func: scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
structured_delegate: scatter.reduce_out
variants: method
- func: scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU, CUDA: scatter_reduce_out
MPS: scatter_reduce_out_mps
- func: scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
structured_delegate: scatter.value_reduce_out
variants: function, method
- func: scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
structured_delegate: scatter.value_reduce_out
variants: method
- func: scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU, CUDA: scatter_value_reduce_out
MPS: scatter_value_reduce_out_mps
- func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
variants: function, method
- func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
variants: function, method
- func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
structured_delegate: scatter_add.out
variants: function, method
tags: core
- func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
structured_delegate: scatter_add.out
variants: method
- func: scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU, CUDA: scatter_add
MPS: scatter_add_mps_out
- func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
variants: function, method
- func: scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
structured_delegate: scatter_reduce.two_out
variants: function, method
tags: core
- func: scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
structured_delegate: scatter_reduce.two_out
variants: method
- func: scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
structured: True
variants: function
dispatch:
CPU, CUDA: scatter_reduce_two
- func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: eq.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: eq.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
- func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: bitwise_and_out
MPS: bitwise_and_out_mps
tags: pointwise
- func: bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_and_out
tags: pointwise
- func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_and
tags: [core, pointwise]
- func: bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_and
autogen: bitwise_and.Scalar_Tensor_out
tags: pointwise
- func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_and.Tensor_out
tags: [core, pointwise]
- func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_and_
tags: pointwise
- func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_and.Tensor_out
tags: pointwise
- func: __and__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
- func: __and__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
- func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: __iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: bitwise_or_out
MPS: bitwise_or_out_mps
tags: pointwise
- func: bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_or_out
tags: pointwise
- func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_or
tags: [core, pointwise]
- func: bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_or
autogen: bitwise_or.Scalar_Tensor_out
tags: pointwise
- func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_or.Tensor_out
tags: [core, pointwise]
- func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_or_
tags: pointwise
- func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_or.Tensor_out
tags: pointwise
- func: __or__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
- func: __or__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
- func: __ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: __ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
- func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
variants: function
dispatch:
CPU, CUDA: bitwise_xor_out
MPS: bitwise_xor_out_mps
tags: pointwise
- func: bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_xor_out
tags: pointwise
- func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_xor
tags: [core, pointwise]
- func: bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_xor
autogen: bitwise_xor.Scalar_Tensor_out
tags: pointwise
- func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: bitwise_xor.Tensor_out
tags: [core, pointwise]
- func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_xor_
tags: pointwise
- func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_xor.Tensor_out
tags: pointwise
- func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: __xor__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: pointwise
- func: __ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: pointwise
- func: __lshift__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __lshift__
tags: pointwise
- func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __lshift__
tags: pointwise
- func: __ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: __ilshift__
autogen: __lshift__.Scalar_out
tags: pointwise
- func: __ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: __ilshift__
autogen: __lshift__.Tensor_out
tags: pointwise
- func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: bitwise_left_shift.Tensor_out
tags: pointwise
- func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_left_shift.Tensor_out
tags: pointwise
- func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: bitwise_left_shift_out
tags: pointwise
- func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift
tags: pointwise
- func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_left_shift_
tags: pointwise
- func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift_out
tags: pointwise
- func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_left_shift
autogen: bitwise_left_shift.Scalar_Tensor_out
tags: pointwise
- func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __rshift__
tags: pointwise
- func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: __rshift__
tags: pointwise
- func: __irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: __irshift__
autogen: __rshift__.Scalar_out
- func: __irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CPU, CUDA: __irshift__
autogen: __rshift__.Tensor_out
- func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function, method
structured_delegate: bitwise_right_shift.Tensor_out
tags: pointwise
- func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: bitwise_right_shift.Tensor_out
tags: pointwise
- func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: bitwise_right_shift_out
tags: pointwise
- func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift
tags: pointwise
- func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: bitwise_right_shift_
tags: pointwise
- func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift_out
tags: pointwise
- func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CompositeExplicitAutograd: bitwise_right_shift
autogen: bitwise_right_shift.Scalar_Tensor_out
tags: pointwise
- func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
structured_delegate: tril.out
variants: method
- func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
structured_delegate: triu.out
variants: method
- func: digamma_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: digamma.out
variants: method
tags: pointwise
- func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: lerp.Scalar_out
tags: pointwise
- func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: lerp.Tensor_out
tags: pointwise
- func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
variants: method
dispatch:
CPU, CUDA: addbmm_
MPS: addbmm_mps_
- func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: addbmm_out
MPS: addbmm_out_mps
- func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: addbmm
MPS: addbmm_mps
- func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: random_
Meta: random_meta_
MPS: random_mps_
autogen: random.from, random.from_out
- func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: random_
Meta: random_meta_
MPS: random_mps_
autogen: random.to, random.to_out
- func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: random_
MPS: random_mps_
Meta: random_meta_
autogen: random, random.out
- func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: uniform_
MPS: uniform_mps_
Meta: uniform_meta_
autogen: uniform, uniform.out
- func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: cauchy_
autogen: cauchy, cauchy.out
- func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: log_normal_
autogen: log_normal, log_normal.out
- func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: exponential_
MPS: exponential_mps_
autogen: exponential, exponential.out
- func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: geometric_
# wrappers for TH functions
autogen: geometric, geometric.out
- func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
- func: diag(Tensor self, int diagonal=0) -> Tensor
variants: method, function
- func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
- func: cross(Tensor self, Tensor other, int? dim=None) -> Tensor
variants: method, function
- func: triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: triu_cpu
CUDA: triu_cuda
MPS: triu_mps_out
- func: triu(Tensor self, int diagonal=0) -> Tensor
structured_delegate: triu.out
variants: method, function
- func: tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: tril_cpu
CUDA: tril_cuda
MPS: tril_mps_out
- func: tril(Tensor self, int diagonal=0) -> Tensor
structured_delegate: tril.out
variants: method, function
- func: tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: tril_indices_cpu
CUDA: tril_indices_cuda
autogen: tril_indices.out
- func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CPU: triu_indices_cpu
CUDA: triu_indices_cuda
autogen: triu_indices.out
- func: trace(Tensor self) -> Tensor
variants: method, function
dispatch:
CPU: trace_cpu
CUDA: trace_cuda
MPS: trace_mps
autogen: trace.out
- func: trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: trace_backward_symint
- func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ne_Scalar_out
MPS: ne_scalar_out_mps
QuantizedCPU: ne_out_quantized_cpu
tags: pointwise
- func: ne.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: ne.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ne_quantized_cpu
tags: [core, pointwise]
- func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ne_Tensor_out
MPS: ne_tensor_out_mps
QuantizedCPU: ne_out_quantized_cpu
tags: pointwise
- func: ne.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: ne.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ne_quantized_cpu
tags: [core, pointwise]
- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: ne.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: ne.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
# not_equal, alias for torch.ne
- func: not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: not_equal.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
- func: not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: not_equal.Tensor(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: eq_Scalar_out
MPS: eq_scalar_out_mps
QuantizedCPU: eq_out_quantized_cpu
tags: pointwise
- func: eq.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: eq.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: eq_quantized_cpu
NestedTensorCPU, NestedTensorCUDA: eq_scalar_nested
tags: [core, pointwise]
- func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: eq_Tensor_out
MPS: eq_tensor_out_mps
QuantizedCPU: eq_out_quantized_cpu
tags: pointwise
- func: eq.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: eq.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: eq_quantized_cpu
tags: [core, pointwise]
- func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ge_Scalar_out
MPS: ge_scalar_out_mps
QuantizedCPU: ge_out_quantized_cpu
tags: pointwise
- func: ge.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: ge.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ge_quantized_cpu
NestedTensorCPU, NestedTensorCUDA: ge_scalar_nested
tags: [core, pointwise]
- func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: ge_Tensor_out
MPS: ge_tensor_out_mps
QuantizedCPU: ge_out_quantized_cpu
tags: pointwise
- func: ge.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: ge.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: ge_quantized_cpu
tags: [core, pointwise]
- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: ge.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: ge.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
# greater_equal, alias for torch.ge
- func: greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
- func: greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: le_Scalar_out
MPS: le_scalar_out_mps
QuantizedCPU: le_out_quantized_cpu
tags: pointwise
- func: le.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: le.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: le_quantized_cpu
tags: [core, pointwise]
- func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: le_Tensor_out
MPS: le_tensor_out_mps
QuantizedCPU: le_out_quantized_cpu
tags: pointwise
- func: le.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: le.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: le_quantized_cpu
tags: [core, pointwise]
- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: le.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: le.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
# less_equal, alias for torch.le
- func: less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: less_equal.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
- func: less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: less_equal.Tensor(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: gt_Scalar_out
MPS: gt_scalar_out_mps
QuantizedCPU: gt_out_quantized_cpu
tags: pointwise
- func: gt.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: gt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: gt_quantized_cpu
NestedTensorCPU, NestedTensorCUDA: gt_scalar_nested
tags: [core, pointwise]
- func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: gt_Tensor_out
MPS: gt_tensor_out_mps
QuantizedCPU: gt_out_quantized_cpu
tags: pointwise
- func: gt.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: gt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: gt_quantized_cpu
tags: [core, pointwise]
- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: gt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: gt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
# greater, alias for torch.gt
- func: greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
- func: greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: greater.Tensor(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: lt_Scalar_out
MPS: lt_scalar_out_mps
QuantizedCPU: lt_out_quantized_cpu
tags: pointwise
- func: lt.Scalar(Tensor self, Scalar other) -> Tensor
structured_delegate: lt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: lt_quantized_cpu
tags: [core, pointwise]
- func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: lt_Tensor_out
MPS: lt_tensor_out_mps
QuantizedCPU: lt_out_quantized_cpu
tags: pointwise
- func: lt.Tensor(Tensor self, Tensor other) -> Tensor
structured_delegate: lt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
QuantizedCPU: lt_quantized_cpu
tags: [core, pointwise]
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
structured_delegate: lt.Scalar_out
device_check: NoCheck # TensorIterator
variants: method
- func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: lt.Tensor_out
device_check: NoCheck # TensorIterator
variants: method
# less, alias for torch.lt
- func: less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: less.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
- func: less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: less.Tensor(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
- func: less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: take_out
- func: take(Tensor self, Tensor index) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: take
- func: take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
- func: take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
variants: method, function
- func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, QuantizedCPU: index_select_out_cpu_
CUDA, QuantizedCUDA: index_select_out_cuda
MPS: index_select_out_mps
- func: index_select(Tensor self, int dim, Tensor index) -> Tensor
variants: method, function
dispatch:
CPU: index_select_cpu_
QuantizedCPU: index_select_quantized_cpu_
CUDA: index_select_cuda
QuantizedCUDA: index_select_quantized_cuda
SparseCPU: index_select_sparse_cpu
SparseCUDA: index_select_sparse_cuda
MPS: index_select_mps
tags: core
- func: index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
- func: index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
variants: method, function
- func: index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
dispatch:
CompositeImplicitAutograd: index_select_backward_symint
- func: masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: masked_select_out_cpu
CUDA: masked_select_out_cuda
MPS: masked_select_out_mps
tags: dynamic_output_shape
- func: masked_select(Tensor self, Tensor mask) -> Tensor
variants: method, function
dispatch:
CPU: masked_select_cpu
CUDA: masked_select_cuda
MPS: masked_select_mps
tags: dynamic_output_shape
- func: masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
- func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: nonzero_out_cpu
CUDA: nonzero_out_cuda
MPS: nonzero_out_mps
tags: dynamic_output_shape
- func: nonzero(Tensor self) -> Tensor
variants: method, function
dispatch:
CPU: nonzero_cpu
CUDA: nonzero_cuda
MPS: nonzero_mps
tags: [dynamic_output_shape, core]
- func: nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: nonzero_static_out_cpu
- func: nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor
variants: method, function
dispatch:
CPU: nonzero_static_cpu
- func: nonzero_numpy(Tensor self) -> Tensor[]
variants: method, function
- func: argwhere(Tensor self) -> Tensor
variants: method, function
tags: dynamic_output_shape
- func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU, CUDA: gather_out
MPS: gather_out_mps
- func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
variants: method, function
structured_delegate: gather.out
tags: core
- func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
variants: function
device_check: NoCheck
device_guard: False
- func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
- func: gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
variants: method, function
- func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
- func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: addcmul_out
MPS: addcmul_out_mps
tags: pointwise
- func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
structured_delegate: addcmul.out
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
structured_delegate: addcmul.out
device_check: NoCheck # TensorIterator
variants: method
tags: pointwise
- func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: addcdiv_out
MPS: addcdiv_out_mps
tags: pointwise
- func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
structured_delegate: addcdiv.out
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
structured_delegate: addcdiv.out
device_check: NoCheck # TensorIterator
variants: method
tags: pointwise
- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: cross_entropy_loss_symint
- func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
structured: True
dispatch:
CPU, CUDA: triangular_solve_out
MPS: triangular_solve_mps_out
SparseCsrCPU: triangular_solve_out_sparse_csr_cpu
SparseCsrCUDA: triangular_solve_out_sparse_csr_cuda
- func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
structured_delegate: triangular_solve.X
variants: method, function
- func: _linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
dispatch:
CompositeExplicitAutograd: _linalg_check_errors
- func: linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
dispatch:
CPU, CUDA: linalg_solve_triangular_out
MPS: linalg_solve_triangular_mps_out
- func: linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_solve_triangular
MPS: linalg_solve_triangular_mps
- func: linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor
python_module: linalg
dispatch:
CompositeImplicitAutograd: linalg_vander_symint
- func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
- func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
variants: method, function
# swapaxes, alias for transpose
- func: swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
- func: swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
# swapdims, alias for transpose
- func: swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
variants: function, method
device_check: NoCheck
device_guard: False
- func: swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
variants: method
device_check: NoCheck
device_guard: False
tags: inplace_view
- func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: cholesky_out
- func: cholesky(Tensor self, bool upper=False) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: cholesky
- func: cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: cholesky_solve_out
- func: cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
variants: method, function
dispatch:
CompositeExplicitAutograd: cholesky_solve
- func: _cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
variants: function
dispatch:
CPU: _cholesky_solve_helper_cpu
CUDA: _cholesky_solve_helper_cuda
autogen: _cholesky_solve_helper.out
- func: cholesky_inverse(Tensor self, bool upper=False) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: cholesky_inverse
- func: cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: cholesky_inverse_out
- func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
- func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
variants: method, function
- func: geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
dispatch:
CPU, CUDA: geqrf_out
- func: geqrf(Tensor self) -> (Tensor a, Tensor tau)
variants: method, function
dispatch:
CPU, CUDA: geqrf
# orgqr, alias for linalg_householder_product
- func: orgqr(Tensor self, Tensor input2) -> Tensor
variants: method, function
- func: orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
- func: ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: ormqr_out
- func: ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: ormqr
- func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
variants: function
- func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
- func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
variants: method, function
# lu_unpack
- func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
structured_delegate: lu_unpack.out
variants: function
- func: lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
variants: function
structured: True
dispatch:
CPU, CUDA: lu_unpack_out
# TODO: remove dispatch section when porting TH CUDA to ATen
- func: multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: multinomial_out
MPS: multinomial_out_mps
- func: multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
variants: method, function
dispatch:
CPU, CUDA: multinomial
MPS: multinomial_mps
tags: nondeterministic_seeded
- func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lgamma_out
MPS: lgamma_out_mps
tags: pointwise
- func: lgamma_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: lgamma.out
variants: method
tags: pointwise
- func: lgamma(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: lgamma.out
variants: method, function
tags: pointwise
- func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: digamma_out
MPS: digamma_out_mps
tags: pointwise
- func: digamma(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: digamma.out
variants: method, function
tags: pointwise
- func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: polygamma_out
MPS: polygamma_out_mps
tags: pointwise
- func: polygamma(int n, Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: polygamma.out
variants: method, function
tags: pointwise
- func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: polygamma_
tags: pointwise
- func: erfinv(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: erfinv.out
variants: method, function
dispatch:
SparseCPU, SparseCUDA: erfinv_sparse
SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr
tags: pointwise
- func: erfinv_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: erfinv.out
variants: method
dispatch:
SparseCPU, SparseCUDA: erfinv_sparse_
SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_
tags: pointwise
- func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: erfinv_out
MPS: erfinv_out_mps
SparseCPU, SparseCUDA: erfinv_sparse_out
SparseCsrCPU, SparseCsrCUDA: erfinv_sparse_csr_out
tags: pointwise
- func: i0(Tensor self) -> Tensor
structured_delegate: i0.out
variants: function, method
tags: pointwise
- func: i0_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: i0.out
variants: function, method
tags: pointwise
- func: i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: i0_out
tags: pointwise
- func: sign(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: sign.out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: sign_sparse
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr
tags: [core, pointwise]
- func: sign_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: sign.out
variants: method
dispatch:
SparseCPU, SparseCUDA: sign_sparse_
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_
tags: pointwise
- func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sign_out
MPS: sign_out_mps
SparseCPU, SparseCUDA: sign_sparse_out
SparseCsrCPU, SparseCsrCUDA: sign_sparse_csr_out
tags: pointwise
- func: signbit(Tensor self) -> Tensor
variants: function, method
structured_delegate: signbit.out
dispatch:
SparseCPU, SparseCUDA: signbit_sparse
SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr
tags: pointwise
- func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU: signbit_out
CUDA: signbit_out
MPS: signbit_out_mps
SparseCPU, SparseCUDA: signbit_sparse_out
SparseCsrCPU, SparseCsrCUDA: signbit_sparse_csr_out
tags: pointwise
- func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: dist
autogen: dist.out
- func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: atan2_out
MPS: atan2_out_mps
tags: [core, pointwise]
- func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: atan2.out
variants: method
tags: pointwise
- func: atan2(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: atan2.out
variants: method, function
tags: [core, pointwise]
# arctan2, alias of atan2
- func: arctan2(Tensor self, Tensor other) -> Tensor
variants: method, function
- func: arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
- func: arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
variants: method
- func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lerp_Scalar
MPS: lerp_Scalar_mps
tags: pointwise
- func: lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: lerp_Tensor
MPS: lerp_Tensor_mps
tags: pointwise
- func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: lerp.Scalar_out
tags: pointwise
- func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: lerp.Tensor_out
tags: pointwise
- func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, MPS: histogram_histc_out
CUDA: _histc_out_cuda
- func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
variants: method, function
dispatch:
CPU, MPS: histogram_histc
CUDA: _histc_cuda
- func: histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
dispatch:
CPU, MPS: histogram_out
- func: histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
variants: method, function
dispatch:
CPU, MPS: histogram
- func: histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
dispatch:
CPU, MPS: histogram_out
- func: histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
variants: method, function
dispatch:
CPU, MPS: histogram
- func: _histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
dispatch:
CPU, MPS: histogramdd_bin_edges
autogen: _histogramdd_bin_edges.out
- func: _histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
dispatch:
CPU, MPS: _histogramdd
autogen: _histogramdd_from_bin_cts.out
- func: _histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
dispatch:
CPU, MPS: _histogramdd
autogen: _histogramdd_from_bin_tensors.out
- func: histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- func: histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- func: histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
- func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: fmod_out
tags: pointwise
- func: fmod.Scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: fmod
tags: [core, pointwise]
- func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
dispatch:
CompositeExplicitAutograd: fmod_
tags: pointwise
- func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: fmod_out
MPS: fmod_mps_out
tags: pointwise
- func: fmod.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: fmod.Tensor_out
variants: method, function
tags: [core, pointwise]
- func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: fmod.Tensor_out
tags: pointwise
- func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: hypot_out
MPS: hypot_out_mps
tags: pointwise
- func: hypot(Tensor self, Tensor other) -> Tensor
structured_delegate: hypot.out
variants: method, function
tags: pointwise
- func: hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: hypot.out
variants: method
tags: pointwise
- func: igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: igamma_out
tags: pointwise
- func: igamma(Tensor self, Tensor other) -> Tensor
structured_delegate: igamma.out
variants: method, function
tags: pointwise
- func: igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: igamma.out
variants: method
tags: pointwise
- func: igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: igammac_out
tags: pointwise
- func: igammac(Tensor self, Tensor other) -> Tensor
structured_delegate: igammac.out
variants: method, function
tags: pointwise
- func: igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: igammac.out
variants: method
tags: pointwise
- func: nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA, MPS: nextafter_out
tags: pointwise
- func: nextafter(Tensor self, Tensor other) -> Tensor
structured_delegate: nextafter.out
variants: method, function
tags: pointwise
- func: nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
structured_delegate: nextafter.out
variants: method
tags: pointwise
- func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: remainder_out
tags: pointwise
- func: remainder.Scalar(Tensor self, Scalar other) -> Tensor
variants: method, function
dispatch:
CompositeExplicitAutograd: remainder
tags: [core, pointwise]
- func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
variants: method
dispatch:
CompositeExplicitAutograd: remainder_
tags: pointwise
- func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: remainder_out
MPS: remainder_out_mps
tags: pointwise
- func: remainder.Tensor(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: remainder.Tensor_out
variants: method, function
tags: [core, pointwise]
- func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: remainder.Tensor_out
variants: method
tags: pointwise
- func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: function
dispatch:
CPU, CUDA, MPS: remainder
autogen: remainder.Scalar_Tensor_out
tags: pointwise
- func: min(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: min
MPS: min_mps
QuantizedCPU: min_quantized_cpu
- func: min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: min_unary_out
QuantizedCPU: min_quantized_unary_out
- func: fmin(Tensor self, Tensor other) -> Tensor
structured_delegate: fmin.out
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA, MPS: fmin_out
tags: pointwise
- func: max(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA: max
MPS: max_mps
QuantizedCPU: max_quantized_cpu
- func: fmax(Tensor self, Tensor other) -> Tensor
structured_delegate: fmax.out
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA, MPS: fmax_out
tags: pointwise
- func: maximum(Tensor self, Tensor other) -> Tensor
structured_delegate: maximum.out
device_check: NoCheck # TensorIterator
variants: method, function
tags: [core, pointwise]
- func: maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: maximum_out
MPS: maximum_out_mps
tags: pointwise
# binary max, alias of maximum
# NOTE: max is not an alias for maximum, since there is also unary max
- func: max.other(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: pointwise
- func: max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: max_unary_out
QuantizedCPU: max_quantized_unary_out
- func: minimum(Tensor self, Tensor other) -> Tensor
structured_delegate: minimum.out
device_check: NoCheck # TensorIterator
variants: method, function
tags: [core, pointwise]
- func: minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA: minimum_out
MPS: minimum_out_mps
tags: pointwise
# binary min, alias for minimum
# NOTE: min is not an alias for minimum, since there is also unary min
- func: min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: pointwise
- func: min.other(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
tags: pointwise
- func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator
dispatch:
CompositeExplicitAutograd: sort_out
- func: sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
structured: True
dispatch:
CPU, CUDA: sort_stable_out
MPS: sort_stable_out_mps
- func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CompositeExplicitAutograd: sort
tags: core
- func: sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
structured_delegate: sort.values_stable
variants: method, function
dispatch:
QuantizedCPU: sort_quantized_cpu_stable
- func: sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
- func: sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
variants: method, function
- func: sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
variants: method, function
- func: msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: msort(Tensor self) -> Tensor
variants: method, function
- func: argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
- func: argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
dispatch:
CPU, CUDA, MPS: argsort_stable
autogen: argsort.stable_out
- func: argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
variants: method, function
- func: topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
structured: True
dispatch:
CPU: topk_out_cpu
CUDA: topk_out_cuda
MPS: topk_out_mps
- func: topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
variants: method, function
structured_delegate: topk.values
dispatch:
QuantizedCPU: topk_quantized_cpu
tags: core
- func: all(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: all.all_out
variants: method, function
- func: all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
structured: True
dispatch:
CPU, CUDA: all_all_out
MPS: all_all_out_mps
- func: any(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: any.all_out
variants: method, function
dispatch:
SparseCPU, SparseCUDA: any_sparse
tags: core
- func: any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
structured: True
dispatch:
CPU, CUDA: any_all_out
MPS: any_all_out_mps
- func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: renorm_out
MPS: renorm_out_mps
- func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
device_check: NoCheck # TensorIterator
variants: method, function
structured_delegate: renorm.out
- func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
structured_delegate: renorm.out
- func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
variants: method
device_check: NoCheck
device_guard: False
dispatch:
CPU, CUDA, Meta, MPS: unfold
QuantizedCPU, QuantizedCUDA: unfold
- func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
variants: function
dispatch:
CPU, CUDA: unfold_backward
autogen: unfold_backward.out
- func: equal(Tensor self, Tensor other) -> bool
tags: [data_dependent_output, pointwise]
variants: method, function
dispatch:
CPU: cpu_equal
CUDA: cuda_equal
MPS: mps_equal
QuantizedCPU: equal_quantized_cpu
- func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: pow_Tensor_Tensor_out
MPS: pow_tensor_tensor_out_mps
tags: pointwise
- func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Tensor_out
variants: method, function
tags: [core, pointwise]
- func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
dispatch:
CPU, CUDA: pow_Scalar_out
MPS: pow_Scalar_out_mps
tags: pointwise
- func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: pow.Scalar_out
tags: [core, pointwise]
- func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: pow_Tensor_Scalar_out
SparseCPU, SparseCUDA: pow_out_sparse_scalar
MPS: pow_tensor_scalar_out_mps
tags: pointwise
- func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Scalar_out
variants: function, method
dispatch:
SparseCPU, SparseCUDA: pow_sparse_scalar
tags: [core, pointwise]
- func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Scalar_out
variants: method
tags: pointwise
- func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured_delegate: pow.Tensor_Tensor_out
variants: method
tags: pointwise
- func: float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
tags: pointwise
- func: float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
variants: function, method
tags: pointwise
- func: float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
tags: pointwise
- func: float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
tags: pointwise
- func: float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
tags: pointwise
- func: float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
variants: function, method
tags: pointwise
- func: float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
variants: method
tags: pointwise
- func: float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
variants: method
tags: pointwise
- func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
variants: method
dispatch:
CPU, CUDA: normal_
MPS: normal_mps_
Meta: normal_meta_
SparseCsrCPU, SparseCsrCUDA: normal_sparse_csr_
NestedTensorCPU, NestedTensorCUDA: normal_nested_
autogen: normal.out
# Only used by the functionalization pass.
# Normally, the codegen would be able to generate a normal() NativeFunction,
# but we can't due to overload ambiguity with normal.Tensor_float.
- func: normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
device_check: NoCheck # TensorIterator
tags: nondeterministic_seeded
dispatch:
CompositeExplicitAutograd: normal_functional
- func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
tags: nondeterministic_seeded
dispatch:
CPU, CUDA: normal_out
MPS: normal_mps_out
Meta: normal_out_meta
- func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
dispatch:
CPU, CUDA: normal
MPS: normal_mps
Meta: normal_meta
tags: nondeterministic_seeded
- func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: normal_out
Meta: normal_out_meta
MPS: normal_mps_out
tags: nondeterministic_seeded
- func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
dispatch:
CPU, CUDA: normal
MPS: normal_mps
Meta: normal_meta
tags: nondeterministic_seeded
- func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: normal_out
Meta: normal_out_meta
MPS: normal_mps_out
tags: nondeterministic_seeded
- func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
dispatch:
CPU, CUDA: normal
MPS: normal_mps
Meta: normal_meta
tags: nondeterministic_seeded
- func: normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
dispatch:
CompositeExplicitAutograd: normal
tags: nondeterministic_seeded
- func: normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: normal_out
tags: nondeterministic_seeded
- func: alias(Tensor(a) self) -> Tensor(a)
variants: method, function
dispatch:
CompositeExplicitAutograd: alias
tags: core
- func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
variants: function
dispatch:
CUDA: _amp_foreach_non_finite_check_and_unscale_cuda_
CPU: _amp_foreach_non_finite_check_and_unscale_cpu_
autogen: _amp_foreach_non_finite_check_and_unscale, _amp_foreach_non_finite_check_and_unscale.out
- func: _amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
variants: function
dispatch:
CUDA: _amp_update_scale_cuda_
CPU: _amp_update_scale_cpu_
autogen: _amp_update_scale, _amp_update_scale.out
#- func: _cat(Tensor[] tensors, int dim=0) -> Tensor
#dispatch:
#CPU: _cat_cpu
#CUDA: cat_cuda
#MPS: cat_mps
#QuantizedCPU: cat_quantized_cpu
#- func: _cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
#dispatch:
#CPU: _cat_out_cpu
#CUDA: cat_out_cuda
#QuantizedCPU: cat_out_quantized_cpu
- func: _foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalar_kernel_slow
CUDA: foreach_tensor_add_scalar_kernel_cuda
- func: _foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalar_kernel_slow_
CUDA: foreach_tensor_add_scalar_kernel_cuda_
autogen: _foreach_add.Scalar_out
- func: _foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_list_kernel_slow
CUDA: foreach_tensor_add_list_kernel_cuda
- func: _foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_list_kernel_slow_
CUDA: foreach_tensor_add_list_kernel_cuda_
autogen: _foreach_add.List_out
- func: _foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalarlist_kernel_slow
CUDA: foreach_tensor_add_scalarlist_kernel_cuda
- func: _foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_scalarlist_kernel_slow_
CUDA: foreach_tensor_add_scalarlist_kernel_cuda_
autogen: _foreach_add.ScalarList_out
- func: _foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_tensor_kernel_slow
CUDA: foreach_tensor_add_tensor_kernel_cuda
- func: _foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_add_tensor_kernel_slow_
CUDA: foreach_tensor_add_tensor_kernel_cuda_
autogen: _foreach_add.Tensor_out
- func: _foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalar_kernel_slow
CUDA: foreach_tensor_sub_scalar_kernel_cuda
- func: _foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalar_kernel_slow_
CUDA: foreach_tensor_sub_scalar_kernel_cuda_
autogen: _foreach_sub.Scalar_out
- func: _foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_list_kernel_slow
CUDA: foreach_tensor_sub_list_kernel_cuda
- func: _foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_list_kernel_slow_
CUDA: foreach_tensor_sub_list_kernel_cuda_
autogen: _foreach_sub.List_out
- func: _foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalarlist_kernel_slow
CUDA: foreach_tensor_sub_scalarlist_kernel_cuda
- func: _foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sub_scalarlist_kernel_slow_
CUDA: foreach_tensor_sub_scalarlist_kernel_cuda_
autogen: _foreach_sub.ScalarList_out
- func: _foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalar_kernel_slow
CUDA: foreach_tensor_mul_scalar_kernel_cuda
- func: _foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalar_kernel_slow_
CUDA: foreach_tensor_mul_scalar_kernel_cuda_
autogen: _foreach_mul.Scalar_out
- func: _foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_list_kernel_slow
CUDA: foreach_tensor_mul_list_kernel_cuda
- func: _foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_list_kernel_slow_
CUDA: foreach_tensor_mul_list_kernel_cuda_
autogen: _foreach_mul.List_out
- func: _foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalarlist_kernel_slow
CUDA: foreach_tensor_mul_scalarlist_kernel_cuda
- func: _foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_scalarlist_kernel_slow_
CUDA: foreach_tensor_mul_scalarlist_kernel_cuda_
autogen: _foreach_mul.ScalarList_out
- func: _foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_tensor_kernel_slow
CUDA: foreach_tensor_mul_tensor_kernel_cuda
- func: _foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_mul_tensor_kernel_slow_
CUDA: foreach_tensor_mul_tensor_kernel_cuda_
autogen: _foreach_mul.Tensor_out
- func: _foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalar_kernel_slow
CUDA: foreach_tensor_div_scalar_kernel_cuda
- func: _foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalar_kernel_slow_
CUDA: foreach_tensor_div_scalar_kernel_cuda_
autogen: _foreach_div.Scalar_out
- func: _foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_list_kernel_slow
CUDA: foreach_tensor_div_list_kernel_cuda
- func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_list_kernel_slow_
CUDA: foreach_tensor_div_list_kernel_cuda_
autogen: _foreach_div.List_out
- func: _foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalarlist_kernel_slow
CUDA: foreach_tensor_div_scalarlist_kernel_cuda
- func: _foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_scalarlist_kernel_slow_
CUDA: foreach_tensor_div_scalarlist_kernel_cuda_
autogen: _foreach_div.ScalarList_out
- func: _foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_tensor_kernel_slow
CUDA: foreach_tensor_div_tensor_kernel_cuda
- func: _foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_div_tensor_kernel_slow_
CUDA: foreach_tensor_div_tensor_kernel_cuda_
autogen: _foreach_div.Tensor_out
- func: _foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalar_kernel_slow
CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
- func: _foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
autogen: _foreach_clamp_max.Scalar_out
- func: _foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_list_kernel_slow
CUDA: foreach_tensor_clamp_max_list_kernel_cuda
- func: _foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_list_kernel_slow_
CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
autogen: _foreach_clamp_max.List_out
- func: _foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
- func: _foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
autogen: _foreach_clamp_max.ScalarList_out
- func: _foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalar_kernel_slow
CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
- func: _foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
autogen: _foreach_clamp_min.Scalar_out
- func: _foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_list_kernel_slow
CUDA: foreach_tensor_clamp_min_list_kernel_cuda
- func: _foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_list_kernel_slow_
CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
autogen: _foreach_clamp_min.List_out
- func: _foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
- func: _foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
autogen: _foreach_clamp_min.ScalarList_out
# foreach_minimum/maximum dispatches to clamp_max/min
- func: _foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalar_kernel_slow
CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda
- func: _foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalar_kernel_slow_
CUDA: foreach_tensor_clamp_min_scalar_kernel_cuda_
autogen: _foreach_maximum.Scalar_out
# foreach_minimum/maximum dispatches to clamp_max/min
- func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_list_kernel_slow
CUDA: foreach_tensor_clamp_min_list_kernel_cuda
- func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_list_kernel_slow_
CUDA: foreach_tensor_clamp_min_list_kernel_cuda_
autogen: _foreach_maximum.List_out
# foreach_minimum/maximum dispatches to clamp_max/min
- func: _foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow
CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda
- func: _foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_min_scalarlist_kernel_slow_
CUDA: foreach_tensor_clamp_min_scalarlist_kernel_cuda_
autogen: _foreach_maximum.ScalarList_out
- func: _foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalar_kernel_slow
CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda
- func: _foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalar_kernel_slow_
CUDA: foreach_tensor_clamp_max_scalar_kernel_cuda_
autogen: _foreach_minimum.Scalar_out
- func: _foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_list_kernel_slow
CUDA: foreach_tensor_clamp_max_list_kernel_cuda
- func: _foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_list_kernel_slow_
CUDA: foreach_tensor_clamp_max_list_kernel_cuda_
autogen: _foreach_minimum.List_out
- func: _foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow
CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda
- func: _foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_clamp_max_scalarlist_kernel_slow_
CUDA: foreach_tensor_clamp_max_scalarlist_kernel_cuda_
autogen: _foreach_minimum.ScalarList_out
- func: _foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalar_slow
CUDA: foreach_tensor_addcdiv_scalar_cuda
- func: _foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalarlist_slow
CUDA: foreach_tensor_addcdiv_scalarlist_cuda
- func: _foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_tensor_slow
CUDA: foreach_tensor_addcdiv_tensor_cuda
- func: _foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalar_slow_
CUDA: foreach_tensor_addcdiv_scalar_cuda_
autogen: _foreach_addcdiv.Scalar_out
- func: _foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_scalarlist_slow_
CUDA: foreach_tensor_addcdiv_scalarlist_cuda_
autogen: _foreach_addcdiv.ScalarList_out
- func: _foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcdiv_tensor_slow_
CUDA: foreach_tensor_addcdiv_tensor_cuda_
autogen: _foreach_addcdiv.Tensor_out
- func: _foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalar_slow
CUDA: foreach_tensor_addcmul_scalar_cuda
- func: _foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalarlist_slow
CUDA: foreach_tensor_addcmul_scalarlist_cuda
- func: _foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_tensor_slow
CUDA: foreach_tensor_addcmul_tensor_cuda
- func: _foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalar_slow_
CUDA: foreach_tensor_addcmul_scalar_cuda_
autogen: _foreach_addcmul.Scalar_out
- func: _foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_scalarlist_slow_
CUDA: foreach_tensor_addcmul_scalarlist_cuda_
autogen: _foreach_addcmul.ScalarList_out
- func: _foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_addcmul_tensor_slow_
CUDA: foreach_tensor_addcmul_tensor_cuda_
autogen: _foreach_addcmul.Tensor_out
- func: _foreach_abs(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_abs_slow
CUDA: foreach_tensor_abs_cuda
- func: _foreach_abs_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_abs_slow_
CUDA: foreach_tensor_abs_cuda_
autogen: _foreach_abs.out
- func: _foreach_acos(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_acos_slow
CUDA: foreach_tensor_acos_cuda
- func: _foreach_acos_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_acos_slow_
CUDA: foreach_tensor_acos_cuda_
autogen: _foreach_acos.out
- func: _foreach_asin(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_asin_slow
CUDA: foreach_tensor_asin_cuda
- func: _foreach_asin_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_asin_slow_
CUDA: foreach_tensor_asin_cuda_
autogen: _foreach_asin.out
- func: _foreach_atan(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_atan_slow
CUDA: foreach_tensor_atan_cuda
- func: _foreach_atan_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_atan_slow_
CUDA: foreach_tensor_atan_cuda_
autogen: _foreach_atan.out
- func: _foreach_ceil(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_ceil_slow
CUDA: foreach_tensor_ceil_cuda
- func: _foreach_ceil_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_ceil_slow_
CUDA: foreach_tensor_ceil_cuda_
autogen: _foreach_ceil.out
- func: _foreach_cos(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cos_slow
CUDA: foreach_tensor_cos_cuda
- func: _foreach_cos_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cos_slow_
CUDA: foreach_tensor_cos_cuda_
autogen: _foreach_cos.out
- func: _foreach_cosh(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cosh_slow
CUDA: foreach_tensor_cosh_cuda
- func: _foreach_cosh_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_cosh_slow_
CUDA: foreach_tensor_cosh_cuda_
autogen: _foreach_cosh.out
- func: _foreach_erf(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erf_slow
CUDA: foreach_tensor_erf_cuda
- func: _foreach_erf_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erf_slow_
CUDA: foreach_tensor_erf_cuda_
autogen: _foreach_erf.out
- func: _foreach_erfc(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erfc_slow
CUDA: foreach_tensor_erfc_cuda
- func: _foreach_erfc_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_erfc_slow_
CUDA: foreach_tensor_erfc_cuda_
autogen: _foreach_erfc.out
- func: _foreach_exp(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_exp_slow
CUDA: foreach_tensor_exp_cuda
- func: _foreach_exp_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_exp_slow_
CUDA: foreach_tensor_exp_cuda_
autogen: _foreach_exp.out
- func: _foreach_expm1(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_expm1_slow
CUDA: foreach_tensor_expm1_cuda
- func: _foreach_expm1_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_expm1_slow_
CUDA: foreach_tensor_expm1_cuda_
autogen: _foreach_expm1.out
- func: _foreach_floor(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_floor_slow
CUDA: foreach_tensor_floor_cuda
- func: _foreach_floor_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_floor_slow_
CUDA: foreach_tensor_floor_cuda_
autogen: _foreach_floor.out
- func: _foreach_frac(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_frac_slow
CUDA: foreach_tensor_frac_cuda
- func: _foreach_frac_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_frac_slow_
CUDA: foreach_tensor_frac_cuda_
autogen: _foreach_frac.out
- func: _foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
CPU: foreach_tensor_ternary_lerp_slow
CUDA: foreach_tensor_lerp_ternary_cuda
autogen: _foreach_lerp.List_out
- func: _foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
CPU: foreach_tensor_ternary_lerp_slow_
CUDA: foreach_tensor_lerp_ternary_cuda_
autogen: _foreach_lerp.List_out
- func: _foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
CPU: foreach_tensor_lerp_list_kernel_slow
CUDA: foreach_tensor_lerp_list_cuda
autogen: _foreach_lerp.Scalar_out
- func: _foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
variants: function
dispatch:
CPU: foreach_tensor_lerp_list_kernel_slow_
CUDA: foreach_tensor_lerp_list_cuda_
autogen: _foreach_lerp.Scalar_out
- func: _foreach_lgamma(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_lgamma_slow
CUDA: foreach_tensor_lgamma_cuda
- func: _foreach_lgamma_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_lgamma_slow_
CUDA: foreach_tensor_lgamma_cuda_
autogen: _foreach_lgamma.out
- func: _foreach_log(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log_slow
CUDA: foreach_tensor_log_cuda
- func: _foreach_log_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log_slow_
CUDA: foreach_tensor_log_cuda_
autogen: _foreach_log.out
- func: _foreach_log10(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log10_slow
CUDA: foreach_tensor_log10_cuda
- func: _foreach_log10_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log10_slow_
CUDA: foreach_tensor_log10_cuda_
autogen: _foreach_log10.out
- func: _foreach_log1p(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log1p_slow
CUDA: foreach_tensor_log1p_cuda
- func: _foreach_log1p_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log1p_slow_
CUDA: foreach_tensor_log1p_cuda_
autogen: _foreach_log1p.out
- func: _foreach_log2(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log2_slow
CUDA: foreach_tensor_log2_cuda
- func: _foreach_log2_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_log2_slow_
CUDA: foreach_tensor_log2_cuda_
autogen: _foreach_log2.out
- func: _foreach_neg(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_neg_slow
CUDA: foreach_tensor_neg_cuda
- func: _foreach_neg_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_neg_slow_
CUDA: foreach_tensor_neg_cuda_
autogen: _foreach_neg.out
- func: _foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_norm_slow
CUDA: foreach_tensor_norm_cuda
autogen: _foreach_norm.Scalar_out
- func: _foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_pow_list_kernel_slow
CUDA: foreach_tensor_pow_list_kernel_cuda
- func: _foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_pow_scalar_kernel_slow
CUDA: foreach_tensor_pow_scalar_kernel_cuda
- func: _foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_pow_scalarlist_kernel_slow
CUDA: foreach_tensor_pow_scalarlist_kernel_cuda
- func: _foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_scalar_pow_list_kernel_slow
CUDA: foreach_scalar_pow_list_kernel_cuda
- func: _foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
device_check: NoCheck
variants: function
dispatch:
CPU: foreach_tensor_pow_list_kernel_slow_
CUDA: foreach_tensor_pow_list_kernel_cuda_
autogen: _foreach_pow.List_out
- func: _foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
device_check: NoCheck
variants: function
dispatch:
CPU: foreach_tensor_pow_scalar_kernel_slow_
CUDA: foreach_tensor_pow_scalar_kernel_cuda_
autogen: _foreach_pow.Scalar_out
- func: _foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
device_check: NoCheck
variants: function
dispatch:
CPU: foreach_tensor_pow_scalarlist_kernel_slow_
CUDA: foreach_tensor_pow_scalarlist_kernel_cuda_
autogen: _foreach_pow.ScalarList_out
- func: _foreach_reciprocal(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_reciprocal_slow
CUDA: foreach_tensor_reciprocal_cuda
- func: _foreach_reciprocal_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_reciprocal_slow_
CUDA: foreach_tensor_reciprocal_cuda_
autogen: _foreach_reciprocal.out
- func: _foreach_round(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_round_slow
CUDA: foreach_tensor_round_cuda
- func: _foreach_round_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_round_slow_
CUDA: foreach_tensor_round_cuda_
autogen: _foreach_round.out
- func: _foreach_sigmoid(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sigmoid_slow
CUDA: foreach_tensor_sigmoid_cuda
- func: _foreach_sigmoid_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sigmoid_slow_
CUDA: foreach_tensor_sigmoid_cuda_
autogen: _foreach_sigmoid.out
- func: _foreach_sign(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sign_slow
CUDA: foreach_tensor_sign_cuda
- func: _foreach_sign_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sign_slow_
CUDA: foreach_tensor_sign_cuda_
autogen: _foreach_sign.out
- func: _foreach_sin(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sin_slow
CUDA: foreach_tensor_sin_cuda
- func: _foreach_sin_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sin_slow_
CUDA: foreach_tensor_sin_cuda_
autogen: _foreach_sin.out
- func: _foreach_sinh(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sinh_slow
CUDA: foreach_tensor_sinh_cuda
- func: _foreach_sinh_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sinh_slow_
CUDA: foreach_tensor_sinh_cuda_
autogen: _foreach_sinh.out
- func: _foreach_sqrt(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sqrt_slow
CUDA: foreach_tensor_sqrt_cuda
- func: _foreach_sqrt_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_sqrt_slow_
CUDA: foreach_tensor_sqrt_cuda_
autogen: _foreach_sqrt.out
- func: _foreach_tan(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tan_slow
CUDA: foreach_tensor_tan_cuda
- func: _foreach_tan_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tan_slow_
CUDA: foreach_tensor_tan_cuda_
autogen: _foreach_tan.out
- func: _foreach_tanh(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tanh_slow
CUDA: foreach_tensor_tanh_cuda
- func: _foreach_tanh_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_tanh_slow_
CUDA: foreach_tensor_tanh_cuda_
autogen: _foreach_tanh.out
- func: _foreach_trunc(Tensor[] self) -> Tensor[]
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_trunc_slow
CUDA: foreach_tensor_trunc_cuda
- func: _foreach_trunc_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_trunc_slow_
CUDA: foreach_tensor_trunc_cuda_
autogen: _foreach_trunc.out
- func: _foreach_zero_(Tensor(a!)[] self) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_zero_slow_
CUDA: foreach_tensor_zero_cuda_
autogen: _foreach_zero, _foreach_zero.out
- func: _foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
variants: function
dispatch:
CPU: foreach_tensor_copy_list_kernel_slow_
CUDA: foreach_tensor_copy_list_kernel_cuda_
autogen: _foreach_copy, _foreach_copy.out
- func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
dispatch:
CPU: bucketize_cpu
CUDA: bucketize_cuda
MPS: bucketize_mps
- func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: bucketize_out_cpu
CUDA: bucketize_out_cuda
MPS: bucketize_out_mps
- func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
dispatch:
CPU: bucketize_cpu
CUDA: bucketize_cuda
MPS: bucketize_mps
autogen: bucketize.Scalar_out
- func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
dispatch:
CPU: searchsorted_cpu
CUDA: searchsorted_cuda
MPS: searchsorted_mps
- func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: searchsorted_out_cpu
CUDA: searchsorted_out_cuda
MPS: searchsorted_out_mps
- func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
dispatch:
CPU: searchsorted_cpu
CUDA: searchsorted_cuda
MPS: searchsorted_mps
- func: searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: searchsorted_out_cpu
CUDA: searchsorted_out_cuda
MPS: searchsorted_out_mps
- func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
structured_delegate: _convert_indices_from_coo_to_csr.out
- func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: _convert_indices_from_coo_to_csr_structured_cpu
CUDA: _convert_indices_from_coo_to_csr_structured_cuda
- func: _convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
structured_delegate: _convert_indices_from_csr_to_coo.out
- func: _convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
structured: True
dispatch:
CPU: _convert_indices_from_csr_to_coo_structured_cpu
CUDA: _convert_indices_from_csr_to_coo_structured_cuda
## NN wrappers
- func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: mse_loss_out
MPS: mse_loss_out_mps
- func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: mse_loss.out
python_module: nn
- func: mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: mse_loss_backward_out
MPS: mse_loss_backward_out_mps
- func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: mse_loss_backward
MPS: mse_loss_backward_mps
- func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: multi_margin_loss_cpu_out
CUDA: multi_margin_loss_cuda_out
- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
python_module: nn
dispatch:
CPU: multi_margin_loss_cpu
CUDA: multi_margin_loss_cuda
- func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: multi_margin_loss_cpu_backward_out
CUDA: multi_margin_loss_cuda_backward_out
- func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
python_module: nn
dispatch:
CPU: multi_margin_loss_cpu_backward
CUDA: multi_margin_loss_cuda_backward
- func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
- func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
python_module: nn
dispatch:
CPU: multilabel_margin_loss_forward_out_cpu
CUDA: multilabel_margin_loss_forward_out_cuda
- func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
python_module: nn
dispatch:
CPU: multilabel_margin_loss_forward_cpu
CUDA: multilabel_margin_loss_forward_cuda
- func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: multilabel_margin_loss_backward_cpu_out
CUDA: multilabel_margin_loss_backward_cuda_out
- func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
python_module: nn
dispatch:
CPU: multilabel_margin_loss_backward_cpu
CUDA: multilabel_margin_loss_backward_cuda
- func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
- func: nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: nll_loss_nd_symint
- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: nll_loss_symint
- func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
dispatch:
CPU: nll_loss_forward_out_cpu
CUDA: nll_loss_forward_out_cuda
MPS: nll_loss_forward_out_mps
- func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
python_module: nn
structured_delegate: nll_loss_forward.output
- func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: nll_loss_backward_out_cpu
CUDA: nll_loss_backward_out_cuda
MPS: nll_loss_backward_out_mps
- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
python_module: nn
structured_delegate: nll_loss_backward.grad_input
- func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: nll_loss2d_symint
- func: nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
python_module: nn
dispatch:
CPU: nll_loss2d_forward_out_cpu
CUDA: nll_loss2d_forward_out_cuda
MPS: nll_loss2d_forward_out_mps
- func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
python_module: nn
dispatch:
CPU: nll_loss2d_forward_cpu
CUDA: nll_loss2d_forward_cuda
MPS: nll_loss2d_forward_mps
- func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: nll_loss2d_backward_out_cpu
CUDA: nll_loss2d_backward_out_cuda
MPS: nll_loss2d_backward_out_mps
- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
python_module: nn
dispatch:
CPU: nll_loss2d_backward_cpu
CUDA: nll_loss2d_backward_cuda
MPS: nll_loss2d_backward_mps
- func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: smooth_l1_loss_out
MPS: smooth_l1_loss_out_mps
- func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
device_check: NoCheck # TensorIterator
structured_delegate: smooth_l1_loss.out
python_module: nn
- func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: smooth_l1_loss_backward_out
CUDA: smooth_l1_loss_backward_out
MPS: smooth_l1_loss_backward_out_mps
- func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: smooth_l1_loss_backward
- func: huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: huber_loss_out
MPS: huber_loss_out_mps
- func: huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: huber_loss
MPS: huber_loss_mps
- func: huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: huber_loss_backward_out
MPS: huber_loss_backward_out_mps
- func: huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: huber_loss_backward
- func: soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CompositeExplicitAutograd: soft_margin_loss_out
- func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: soft_margin_loss
- func: soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CompositeExplicitAutograd: soft_margin_loss_backward_out
- func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: soft_margin_loss_backward
- func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: elu_out
MPS: elu_out_mps
- func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
structured_delegate: elu.out
device_check: NoCheck # TensorIterator
python_module: nn
- func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: elu_backward_out
MPS: elu_backward_out_mps
- func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
structured_delegate: elu_backward.grad_input
python_module: nn
- func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
structured_delegate: elu.out
device_check: NoCheck # TensorIterator
python_module: nn
- func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: glu_out
MPS: glu_out_mps
- func: glu(Tensor self, int dim=-1) -> Tensor
structured_delegate: glu.out
device_check: NoCheck # TensorIterator
python_module: nn
- func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: glu_backward_cpu_out
CUDA: glu_backward_cuda_out
MPS: glu_backward_mps_out
- func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
python_module: nn
dispatch:
CPU: glu_backward_cpu
CUDA: glu_backward_cuda
MPS: glu_backward_mps
- func: glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: glu_jvp
autogen: glu_jvp.out
- func: glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: glu_backward_jvp
autogen: glu_backward_jvp.out
- func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardsigmoid_out
MPS: hardsigmoid_out_mps
QuantizedCPU: hardsigmoid_out_quantized_cpu
- func: hardsigmoid(Tensor self) -> Tensor
structured_delegate: hardsigmoid.out
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
QuantizedCPU: hardsigmoid_quantized_cpu
- func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
structured_delegate: hardsigmoid.out
device_check: NoCheck # TensorIterator
python_module: nn
- func: hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: hardsigmoid_backward_out
MPS: hardsigmoid_backward_out_mps
- func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
structured_delegate: hardsigmoid_backward.grad_input
python_module: nn
- func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA, MPS: hardtanh_out
QuantizedCPU: hardtanh_out_quantized_cpu
- func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA, MPS: hardtanh
QuantizedCPU: hardtanh_quantized_cpu
tags: core
- func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU, CUDA: hardtanh_backward_out
MPS: hardtanh_backward_out_mps
- func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: hardtanh_backward
MPS: hardtanh_backward_mps
- func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA, MPS: hardtanh_
QuantizedCPU: hardtanh_quantized_cpu_
- func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardswish_out
MPS: hardswish_out_mps
- func: hardswish(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardswish
MPS: hardswish_mps
- func: hardswish_(Tensor(a!) self) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: hardswish_
MPS: hardswish_mps_
- func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU, CUDA: hardswish_backward
MPS: hardswish_backward_mps
autogen: hardswish_backward.out
- func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: leaky_relu_out
MPS: leaky_relu_out_mps
QuantizedCPU: leaky_relu_out_quantized_cpu
- func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
structured_delegate: leaky_relu.out
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
QuantizedCPU: leaky_relu_quantized_cpu
tags: core
- func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: leaky_relu_backward_out
MPS: leaky_relu_backward_out_mps
- func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
structured_delegate: leaky_relu_backward.grad_input
python_module: nn
- func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
structured_delegate: leaky_relu.out
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
QuantizedCPU: leaky_relu_quantized_cpu_
- func: log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: nn
- func: log_sigmoid(Tensor self) -> Tensor
device_check: NoCheck # TensorIterator
python_module: nn
- func: log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU: log_sigmoid_forward_out_cpu
CUDA: log_sigmoid_forward_out_cuda
MPS: log_sigmoid_forward_out_mps
- func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU: log_sigmoid_forward_cpu
CUDA: log_sigmoid_forward_cuda
MPS: log_sigmoid_forward_mps
- func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: log_sigmoid_backward_cpu_out
CUDA: log_sigmoid_backward_cuda_out
MPS: log_sigmoid_backward_mps_out
- func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
python_module: nn
dispatch:
CPU: log_sigmoid_backward_cpu
CUDA: log_sigmoid_backward_cuda
MPS: log_sigmoid_backward_mps
- func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
tags: nondeterministic_seeded
dispatch:
CPU: rrelu_with_noise_out_cpu
CUDA: rrelu_with_noise_out_cuda
- func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
python_module: nn
dispatch:
CPU: rrelu_with_noise_cpu
CUDA: rrelu_with_noise_cuda
tags: nondeterministic_seeded
- func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: rrelu_with_noise_backward
autogen: rrelu_with_noise_backward.out
- func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
python_module: nn
tags: nondeterministic_seeded
dispatch:
CPU: rrelu_with_noise_cpu_
CUDA: rrelu_with_noise_cuda_
- func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: softplus_out
MPS: softplus_out_mps
- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
structured_delegate: softplus.out
device_check: NoCheck # TensorIterator
python_module: nn
- func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: softplus_backward_out
MPS: softplus_backward_out_mps
- func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
structured_delegate: softplus_backward.grad_input
python_module: nn
- func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
device_check: NoCheck # TensorIterator
python_module: nn
dispatch:
CPU, CUDA: softshrink_out
MPS: softshrink_out_mps
- func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
structured_delegate: softshrink.out
device_check: NoCheck # TensorIterator
python_module: nn
- func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: nn
dispatch:
CPU, CUDA: softshrink_backward_out
MPS: softshrink_backward_out_mps
- func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
structured_delegate: softshrink_backward.grad_input
python_module: nn
- func: adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool2d_out_cpu
CUDA: adaptive_avg_pool2d_out_cuda
MPS: adaptive_avg_pool2d_out_mps
MkldnnCPU: mkldnn_adaptive_avg_pool2d_out_stub
- func: adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: adaptive_avg_pool2d_symint
- func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
dispatch:
MkldnnCPU: mkldnn_adaptive_avg_pool2d
- func: mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
- func: mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
dispatch:
MkldnnCPU: mkldnn_adaptive_avg_pool2d_backward
autogen: mkldnn_adaptive_avg_pool2d_backward.out
- func: _adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
dispatch:
CPU: adaptive_avg_pool2d_cpu
CUDA: adaptive_avg_pool2d_cuda
MPS: adaptive_avg_pool2d_mps
QuantizedCPU: adaptive_avg_pool2d_quantized_cpu
QuantizedCUDA: adaptive_avg_pool2d_quantized_cuda
autogen: _adaptive_avg_pool2d.out
tags: core
- func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU: adaptive_avg_pool2d_backward_cpu
CUDA: adaptive_avg_pool2d_backward_cuda
MPS: adaptive_avg_pool2d_backward_mps
autogen: _adaptive_avg_pool2d_backward.out
tags: core
- func: adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_out_cpu
CUDA: adaptive_avg_pool3d_out_cuda
QuantizedCPU: adaptive_avg_pool3d_out_quantized_cpu
- func: adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: adaptive_avg_pool3d_symint
- func: _adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
dispatch:
CPU: adaptive_avg_pool3d_cpu
CUDA: adaptive_avg_pool3d_cuda
QuantizedCPU: adaptive_avg_pool3d_quantized_cpu
autogen: _adaptive_avg_pool3d.out
tags: core
- func: adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_backward_out_cpu
CUDA: adaptive_avg_pool3d_backward_out_cuda
- func: _adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
dispatch:
CPU: adaptive_avg_pool3d_backward_cpu
CUDA: adaptive_avg_pool3d_backward_cuda
autogen: _adaptive_avg_pool3d_backward.out
# Return: (Tensor output, Tensor indices)
- func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
dispatch:
CPU: adaptive_max_pool2d_out_cpu
CUDA: adaptive_max_pool2d_out_cuda
MPS: adaptive_max_pool2d_out_mps
# Return: (Tensor output, Tensor indices)
- func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
python_module: nn
structured_delegate: adaptive_max_pool2d.out
- func: adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: adaptive_max_pool2d_backward_out_cpu
CUDA: adaptive_max_pool2d_backward_out_cuda
MPS: adaptive_max_pool2d_backward_out_mps
- func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
python_module: nn
structured_delegate: adaptive_max_pool2d_backward.grad_input
# Return: (Tensor output, Tensor indices)
- func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
dispatch:
CPU: adaptive_max_pool3d_out_cpu
CUDA: adaptive_max_pool3d_out_cuda
# Return: (Tensor output, Tensor indices)
- func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
python_module: nn
structured_delegate: adaptive_max_pool3d.out
- func: adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: adaptive_max_pool3d_backward_out_cpu
CUDA: adaptive_max_pool3d_backward_out_cuda
- func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
python_module: nn
structured_delegate: adaptive_max_pool3d_backward.grad_input
- func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
precomputed:
- kernel_size -> int kH, int kW
- stride -> int dH, int dW
- padding -> int padH, int padW
dispatch:
CPU: avg_pool2d_out_cpu
CUDA: avg_pool2d_out_cuda
MPS: avg_pool2d_out_mps
MkldnnCPU: mkldnn_avg_pool2d_out
- func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
python_module: nn
structured_delegate: avg_pool2d.out
dispatch:
MkldnnCPU: mkldnn_avg_pool2d
QuantizedCPU: avg_pool2d_quantized_cpu
tags: core
- func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: avg_pool2d_backward_out_cpu
CUDA: avg_pool2d_backward_out_cuda
MPS: avg_pool2d_backward_out_mps
MkldnnCPU: mkldnn_avg_pool2d_backward_out
- func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
python_module: nn
structured_delegate: avg_pool2d_backward.grad_input
dispatch:
MkldnnCPU: mkldnn_avg_pool2d_backward
tags: core
- func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: avg_pool3d_out_cpu
CUDA: avg_pool3d_out_cuda
MkldnnCPU: mkldnn_avg_pool3d_out
- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
python_module: nn
structured_delegate: avg_pool3d.out
dispatch:
MkldnnCPU: mkldnn_avg_pool3d
QuantizedCPU: avg_pool3d_quantized_cpu
tags: core
- func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: avg_pool3d_backward_out_cpu
CUDA: avg_pool3d_backward_out_cuda
MkldnnCPU: mkldnn_avg_pool3d_backward_out
- func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
python_module: nn
structured_delegate: avg_pool3d_backward.grad_input
dispatch:
MkldnnCPU: mkldnn_avg_pool3d_backward
# Return: (Tensor output, Tensor indices)
- func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
dispatch:
CPU: fractional_max_pool2d_out_cpu
CUDA: fractional_max_pool2d_out_cuda
# Return: (Tensor output, Tensor indices)
- func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
python_module: nn
structured_delegate: fractional_max_pool2d.output
- func: fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: fractional_max_pool2d_backward_cpu
CUDA: fractional_max_pool2d_backward_cuda
- func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
python_module: nn
structured_delegate: fractional_max_pool2d_backward.grad_input
# Return: (Tensor output, Tensor indices)
- func: fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
precomputed:
- kernel_size -> int poolSizeT, int poolSizeH, int poolSizeW
- output_size -> int outputT, int outputH, int outputW
- int numBatch, int numPlanes, int inputT, int inputH, int inputW
dispatch:
CPU: fractional_max_pool3d_out_cpu
CUDA: fractional_max_pool3d_out_cuda
# Return: (Tensor output, Tensor indices)
- func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
python_module: nn
structured_delegate: fractional_max_pool3d.output
- func: fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: fractional_max_pool3d_backward_out_cpu
CUDA: fractional_max_pool3d_backward_out_cuda
- func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
python_module: nn
dispatch:
CPU: fractional_max_pool3d_backward_cpu
CUDA: fractional_max_pool3d_backward_cuda
# Return: (Tensor output, Tensor indices)
- func: max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
structured: True
dispatch:
CPU: max_pool2d_with_indices_out_cpu
CUDA: max_pool2d_with_indices_out_cuda
MPS: max_pool2d_with_indices_out_mps
# Return: (Tensor output, Tensor indices)
- func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
python_module: nn
structured_delegate: max_pool2d_with_indices.out
tags: core
- func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: max_pool2d_with_indices_backward_out_cpu
CUDA: max_pool2d_with_indices_backward_out_cuda
MPS: max_pool2d_with_indices_backward_out_mps
- func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
python_module: nn
structured_delegate: max_pool2d_with_indices_backward.grad_input
tags: core
# Return: (Tensor output, Tensor indices)
- func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
python_module: nn
dispatch:
CPU: max_pool3d_with_indices_out_cpu
CUDA: max_pool3d_with_indices_out_cuda
# Return: (Tensor output, Tensor indices)
- func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
python_module: nn
dispatch:
CPU: max_pool3d_with_indices_cpu
CUDA: max_pool3d_with_indices_cuda
tags: core
- func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: max_pool3d_with_indices_backward_out_cpu
CUDA: max_pool3d_with_indices_backward_out_cuda
- func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
python_module: nn
dispatch:
CPU: max_pool3d_with_indices_backward_cpu
CUDA: max_pool3d_with_indices_backward_cuda
- func: max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: max_unpooling2d_forward_out_cpu
CUDA: max_unpooling2d_forward_out_cuda
- func: max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor
python_module: nn
dispatch:
CPU: max_unpooling2d_forward_cpu
CUDA: max_unpooling2d_forward_cuda
- func: max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: max_unpooling3d_forward_out_cpu
CUDA: max_unpooling3d_forward_out_cuda
- func: max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor
python_module: nn
dispatch:
CPU: max_unpooling3d_forward_cpu
CUDA: max_unpooling3d_forward_cuda
- func: reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad1d_out_cpu
QuantizedCPU: reflection_pad1d_out_quantized_cpu
CUDA: reflection_pad1d_out_cuda
MPS: reflection_pad1d_out_mps
- func: reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad1d.out
tags: core
- func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad1d_backward_out_cpu
CUDA: reflection_pad1d_backward_out_cuda
MPS: reflection_pad1d_backward_out_mps
- func: reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad1d_backward.grad_input
- func: reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU, QuantizedCPU: reflection_pad2d_out_cpu
CUDA: reflection_pad2d_out_cuda
MPS: reflection_pad2d_out_mps
- func: reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
dispatch:
CPU: reflection_pad2d_cpu
QuantizedCPU: reflection_pad2d_quantized_cpu
CUDA: reflection_pad2d_cuda
MPS: reflection_pad2d_mps
tags: core
- func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: reflection_pad2d_backward_out_cpu
CUDA: reflection_pad2d_backward_out_cuda
MPS: reflection_pad2d_backward_out_mps
- func: reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
dispatch:
CPU: reflection_pad2d_backward_cpu
CUDA: reflection_pad2d_backward_cuda
MPS: reflection_pad2d_backward_mps
- func: reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad3d_out_cpu
CUDA: reflection_pad3d_out_cuda
MPS: reflection_pad3d_out_mps
- func: reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad3d.out
tags: core
- func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: reflection_pad3d_backward_out_cpu
CUDA: reflection_pad3d_backward_out_cuda
MPS: reflection_pad3d_backward_out_mps
- func: reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
structured_delegate: reflection_pad3d_backward.grad_input
- func: replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad1d_out_cpu
CUDA: replication_pad1d_out_cuda
MPS: replication_pad1d_out_mps
- func: replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad1d.out
- func: replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad1d_backward_out_cpu
CUDA: replication_pad1d_backward_out_cuda
MPS: replication_pad1d_backward_out_mps
- func: replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad1d_backward.grad_input
- func: replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad2d_out_cpu
CUDA: replication_pad2d_out_cuda
MPS: replication_pad2d_out_mps
- func: replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad2d.out
tags: core
- func: replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: replication_pad2d_backward_out_cpu
CUDA: replication_pad2d_backward_out_cuda
MPS: replication_pad2d_backward_out_mps
- func: replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
python_module: nn
dispatch:
CPU: replication_pad2d_backward_cpu
CUDA: replication_pad2d_backward_cuda
MPS: replication_pad2d_backward_mps
- func: replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: replication_pad3d_out_cpu
CUDA: replication_pad3d_out_cuda
MPS: replication_pad3d_out_mps
- func: replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
structured_delegate: replication_pad3d.out
tags: core
- func: replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: replication_pad3d_backward_out_cpu
CUDA: replication_pad3d_backward_out_cuda
MPS: replication_pad3d_backward_out_mps
- func: replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
python_module: nn
dispatch:
CPU: replication_pad3d_backward_cpu
CUDA: replication_pad3d_backward_cuda
MPS: replication_pad3d_backward_mps
- func: _pad_circular(Tensor self, SymInt[] pad) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: _pad_circular_symint
- func: _pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: _pad_enum_symint
- func: pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
python_module: nn
dispatch:
CompositeImplicitAutograd: pad_symint
- func: upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_linear1d.vec_out
- func: upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_bilinear2d.vec_out
tags: core
- func: _upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
autogen: _upsample_bilinear2d_aa.vec_out
- func: upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_trilinear3d.vec_out
- func: upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_bicubic2d.vec_out
- func: _upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
python_module: nn
autogen: _upsample_bicubic2d_aa.vec_out
- func: upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_nearest1d.vec_out
- func: _upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
autogen: _upsample_nearest_exact1d.vec_out
- func: upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_nearest2d.vec_out
tags: core
- func: _upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
autogen: _upsample_nearest_exact2d.vec_out
- func: upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
autogen: upsample_nearest3d.vec_out
- func: _upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
python_module: nn
autogen: _upsample_nearest_exact3d.vec_out
# NOTE: all of the non-"vec" upsample overloads are only kept for backward compatibility.
- func: upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_linear1d_out_cpu
CUDA: upsample_linear1d_out_cuda
MPS: upsample_linear1d_out_mps
- func: upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_linear1d.out
- func: upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_linear1d_backward_out_cpu
CUDA: upsample_linear1d_backward_out_cuda
MPS: upsample_linear1d_backward_out_mps
- func: upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_linear1d_backward.grad_input
- func: upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bilinear2d_out_cpu
CUDA: upsample_bilinear2d_out_cuda
MPS: upsample_bilinear2d_out_mps
- func: upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bilinear2d.out
dispatch:
QuantizedCPU: upsample_bilinear2d_quantized_cpu
- func: upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bilinear2d_backward_out_cpu
CUDA: upsample_bilinear2d_backward_out_cuda
MPS: upsample_bilinear2d_backward_out_mps
- func: upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bilinear2d_backward.grad_input
- func: _upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bilinear2d_aa_out_cpu
CUDA: _upsample_bilinear2d_aa_out_cuda
- func: _upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bilinear2d_aa.out
- func: _upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bilinear2d_aa_backward_out_cpu
CUDA: _upsample_bilinear2d_aa_backward_out_cuda
- func: _upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bilinear2d_aa_backward.grad_input
- func: upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bicubic2d_out_cpu
CUDA: upsample_bicubic2d_out_cuda
- func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bicubic2d.out
- func: upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_bicubic2d_backward_out_cpu
CUDA: upsample_bicubic2d_backward_out_cuda
- func: upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_bicubic2d_backward.grad_input
- func: _upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bicubic2d_aa_out_cpu
CUDA: _upsample_bicubic2d_aa_out_cuda
- func: _upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bicubic2d_aa.out
- func: _upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_bicubic2d_aa_backward_out_cpu
CUDA: _upsample_bicubic2d_aa_backward_out_cuda
- func: _upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_bicubic2d_aa_backward.grad_input
- func: upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_trilinear3d_out_cpu
CUDA: upsample_trilinear3d_out_cuda
- func: upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_trilinear3d.out
- func: upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_trilinear3d_backward_out_cpu
CUDA: upsample_trilinear3d_backward_out_cuda
- func: upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_trilinear3d_backward.grad_input
- func: upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest1d_out_cpu
CUDA: upsample_nearest1d_out_cuda
MPS: upsample_nearest1d_out_mps
- func: _upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact1d_out_cpu
CUDA: _upsample_nearest_exact1d_out_cuda
MPS: _upsample_nearest_exact1d_out_mps
- func: upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest1d.out
- func: _upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact1d.out
- func: upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest1d_backward_out_cpu
CUDA: upsample_nearest1d_backward_out_cuda
MPS: upsample_nearest1d_backward_out_mps
- func: _upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact1d_backward_out_cpu
CUDA: _upsample_nearest_exact1d_backward_out_cuda
MPS: _upsample_nearest_exact1d_backward_out_mps
- func: upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest1d_backward.grad_input
- func: _upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact1d_backward.grad_input
- func: upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest2d_out_cpu
CUDA: upsample_nearest2d_out_cuda
MPS: upsample_nearest2d_out_mps
- func: _upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact2d_out_cpu
CUDA: _upsample_nearest_exact2d_out_cuda
MPS: _upsample_nearest_exact2d_out_mps
- func: upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest2d.out
dispatch:
QuantizedCPU: upsample_nearest2d_quantized_cpu
- func: _upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact2d.out
dispatch:
QuantizedCPU: _upsample_nearest_exact2d_quantized_cpu
- func: upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest2d_backward_out_cpu
CUDA: upsample_nearest2d_backward_out_cuda
MPS: upsample_nearest2d_backward_out_mps
- func: _upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact2d_backward_out_cpu
CUDA: _upsample_nearest_exact2d_backward_out_cuda
MPS: _upsample_nearest_exact2d_backward_out_mps
- func: upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest2d_backward.grad_input
- func: _upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact2d_backward.grad_input
- func: upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest3d_out_cpu
CUDA: upsample_nearest3d_out_cuda
- func: _upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact3d_out_cpu
CUDA: _upsample_nearest_exact3d_out_cuda
- func: upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest3d.out
dispatch:
QuantizedCPU: upsample_nearest3d_quantized_cpu
- func: _upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact3d.out
dispatch:
QuantizedCPU: _upsample_nearest_exact3d_quantized_cpu
- func: upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: upsample_nearest3d_backward_out_cpu
CUDA: upsample_nearest3d_backward_out_cuda
- func: _upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: _upsample_nearest_exact3d_backward_out_cpu
CUDA: _upsample_nearest_exact3d_backward_out_cuda
- func: upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: upsample_nearest3d_backward.grad_input
- func: _upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
python_module: nn
structured_delegate: _upsample_nearest_exact3d_backward.grad_input
- func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: sigmoid_backward_out
MPS: sigmoid_backward_out_mps
tags: pointwise
- func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
python_module: nn
structured_delegate: sigmoid_backward.grad_input
tags: pointwise
- func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: logit_backward_out
MPS: logit_backward_out_mps
tags: pointwise
- func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
python_module: nn
structured_delegate: logit_backward.grad_input
tags: pointwise
- func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: tanh_backward_out
MPS: tanh_backward_out_mps
tags: pointwise
- func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor
python_module: nn
structured_delegate: tanh_backward.grad_input
# What's a thnn_conv_ versus a slow_conv_?
#
# Historically, we have inefficient implementations of convolutions
# coming from the THNN/THCUNN library. These convolutions typically
# operated by computing the Toeplitz matrix and then doing a matrix
# multiply with the input; this is very memory inefficient! However,
# occasionally, we really don't have anything better, so it's helpful
# to have these fallbacks when there is no more optimized implementation
# in cudnn or mkldnn, etc. Both thnn_ and slow_ convolutions fall
# into this bucket.
#
# The difference between these two designations, is that thnn_ refers
# to a convolution that is still written in the "legacy" style; that is,
# C code in the THNN/ or THCUNN/ directory. A slow_ convolution is
# one that is written in the native style: modern C++. Algorithmically,
# these are the same thing, but we give them different prefixes to
# make the operational distinction clear.
tags: pointwise
- func: slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
structured: True
dispatch:
CPU: slow_conv_transpose2d_structured_cpu
CUDA: slow_conv_transpose2d_structured_cuda
- func: slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor
python_module: nn
structured_delegate: slow_conv_transpose2d.out
- func: slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: slow_conv_transpose3d_out_cpu
CUDA: slow_conv_transpose3d_out_cuda
- func: slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_transpose3d_cpu
CUDA: slow_conv_transpose3d_cuda
- func: thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
- func: thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor
python_module: nn
- func: _slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!)
python_module: nn
dispatch:
CPU: slow_conv2d_forward_out_cpu
CUDA: slow_conv2d_forward_out_cuda
- func: _slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv2d_forward_cpu
CUDA: slow_conv2d_forward_cuda
- func: _slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
python_module: nn
dispatch:
CPU: slow_conv2d_backward_out_cpu
CUDA: slow_conv2d_backward_out_cuda
- func: _slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
python_module: nn
dispatch:
CPU: slow_conv2d_backward_cpu
CUDA: slow_conv2d_backward_cuda
autogen: _slow_conv2d_backward.output_mask_out
- func: _conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: nn
dispatch:
CUDA: conv_depthwise2d_cuda_out
- func: _conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor
python_module: nn
dispatch:
CUDA: conv_depthwise2d_cuda
- func: conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor
python_module: nn
dispatch:
CUDA: conv_depthwise3d_cuda
autogen: conv_depthwise3d.out
- func: slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
- func: slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor
python_module: nn
- func: slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
python_module: nn
dispatch:
CPU: slow_conv3d_forward_out_cpu
- func: slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv3d_forward_cpu
- func: slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_dilated2d_cpu
CUDA: slow_conv_dilated2d_cuda
autogen: slow_conv_dilated2d.out
- func: slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
python_module: nn
dispatch:
CPU: slow_conv_dilated3d_cpu
CUDA: slow_conv_dilated3d_cuda
autogen: slow_conv_dilated3d.out
- func: col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: col2im_out_cpu
CUDA: col2im_out_cuda
- func: col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
python_module: nn
dispatch:
CPU: col2im_cpu
CUDA: col2im_cuda
tags: core
- func: column_stack(Tensor[] tensors) -> Tensor
- func: column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
- func: im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
python_module: nn
dispatch:
CPU: im2col_out_cpu
CUDA: im2col_out_cuda
- func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
python_module: nn
dispatch:
CPU: im2col_cpu
CUDA: im2col_cuda
- func: isfinite(Tensor self) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
- func: isinf(Tensor self) -> Tensor
variants: function, method
device_check: NoCheck
device_guard: False
dispatch:
CompositeExplicitAutograd: isinf
SparseCPU, SparseCUDA: isinf_sparse
SparseMeta: isinf_sparse_meta
SparseCsrCPU, SparseCsrCUDA: isinf_sparse_csr
autogen: isinf.out
tags: [core, pointwise]
- func: record_stream(Tensor(a!) self, Stream s) -> ()
variants: method
dispatch:
CUDA: record_stream_cuda
- func: isposinf(Tensor self) -> Tensor
variants: function, method
structured_delegate: isposinf.out
dispatch:
SparseCPU, SparseCUDA: isposinf_sparse
SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr
tags: pointwise
- func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: isposinf_out
SparseCPU, SparseCUDA: isposinf_sparse_out
SparseCsrCPU, SparseCsrCUDA: isposinf_sparse_csr_out
tags: pointwise
- func: isneginf(Tensor self) -> Tensor
variants: function, method
structured_delegate: isneginf.out
dispatch:
SparseCPU, SparseCUDA: isneginf_sparse
SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr
tags: pointwise
- func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: isneginf_out
SparseCPU, SparseCUDA: isneginf_sparse_out
SparseCsrCPU, SparseCsrCUDA: isneginf_sparse_csr_out
tags: pointwise
# NOTE [_add_batch_dim and _remove_batch_dim]
# _add_batch_dim and _remove_batch_dim are meant to be used in the implementation
# of the vmap frontend API (see torch/_vmap_internals.py). They are not
# user-facing, hence the leading underscore. Please don't use them them anywhere else.
- func: _add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
variants: function
# See NOTE [_add_batch_dim and _remove_batch_dim]
- func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
variants: function
## Functions related to the `torch.special` namespace
# Note [special namespace binding]
# Functions in the special python module should have their names start with
# "special_" underscore and be bound to the desired Python name in
# torch/special/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/special.h.
# The "special_" names should be hidden from the user and not documented.
- func: special_entr(Tensor self) -> Tensor
structured_delegate: special_entr.out
python_module: special
variants: function
tags: pointwise
- func: special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_entr_out
tags: pointwise
- func: special_ndtri(Tensor self) -> Tensor
structured_delegate: special_ndtri.out
python_module: special
variants: function
tags: pointwise
- func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_ndtri_out
tags: pointwise
- func: special_log_ndtr(Tensor self) -> Tensor
structured_delegate: special_log_ndtr.out
python_module: special
variants: function
tags: pointwise
- func: special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_log_ndtr_out
tags: pointwise
- func: special_expm1(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_exp2(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_psi(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_digamma(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_gammaln(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_erf(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_erfc(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
- func: special_erfcx(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_erfcx.out
tags: pointwise
- func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_erfcx_out
tags: pointwise
- func: special_erfinv(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
- func: special_ndtr(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_xlog1py(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
structured_delegate: special_xlog1py.out
tags: pointwise
- func: special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py
tags: pointwise
- func: special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py
tags: pointwise
- func: special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_xlog1py_out
tags: pointwise
- func: special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py_out
tags: pointwise
- func: special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_xlog1py_out
tags: pointwise
- func: special_xlogy(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
- func: special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
- func: special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
- func: special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
- func: special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
- func: special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
- func: special_zeta(Tensor self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
structured_delegate: special_zeta.out
tags: pointwise
- func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta
tags: pointwise
- func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta
tags: pointwise
- func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
structured: True
structured_inherits: TensorIteratorBase
python_module: special
variants: function
dispatch:
CPU, CUDA: special_zeta_out
tags: pointwise
- func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta_out
tags: pointwise
- func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
python_module: special
variants: function
dispatch:
CompositeExplicitAutograd: special_zeta_out
tags: pointwise
- func: special_i0(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_i0e(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_i0e.out
tags: pointwise
- func: special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_i0e_out
tags: pointwise
- func: special_i1(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_i1.out
tags: pointwise
- func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_i1_out
tags: pointwise
- func: special_i1e(Tensor self) -> Tensor
python_module: special
variants: function
structured_delegate: special_i1e.out
tags: pointwise
- func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
structured: True
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA: special_i1e_out
tags: pointwise
- func: special_logit(Tensor self, float? eps=None) -> Tensor
python_module: special
variants: function
- func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
- func: special_polygamma(int n, Tensor self) -> Tensor
python_module: special
variants: function
- func: special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
- func: special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
python_module: special
variants: function
- func: special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
- func: special_expit(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_sinc(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_round(Tensor self, *, int decimals=0) -> Tensor
python_module: special
variants: function
- func: special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_log1p(Tensor self) -> Tensor
python_module: special
variants: function
- func: special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
python_module: special
variants: function
- func: special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_gammainc(Tensor self, Tensor other) -> Tensor
python_module: special
variants: function
- func: special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_gammaincc(Tensor self, Tensor other) -> Tensor
python_module: special
variants: function
- func: special_multigammaln(Tensor self, int p) -> Tensor
python_module: special
variants: function
- func: special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
python_module: special
variants: function
- func: special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
python_module: special
variants: function
## Functions related to the fast Fourier transform and the torch.fft namespace
# Note [FFT namespace binding]
# Functions in the fft python module should have their names start with
# "fft_" underscore and be bound to the desired Python name in
# torch/fft/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/fft.h.
# The "fft_" names should be hidden from the user and not documented.
#
# See fft_fft as an example.
# torch.fft.fft
# NOTE: NOT an alias for torch.fft, which has different semantics
- func: fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_fft_symint
- func: fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_fft_symint_out
- func: fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ifft_symint
- func: fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ifft_symint_out
- func: fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_rfft_symint
- func: fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_rfft_symint_out
- func: fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_irfft_symint
- func: fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_irfft_symint_out
- func: fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_hfft_symint
- func: fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_hfft_symint_out
- func: fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ihfft_symint
- func: fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ihfft_symint_out
- func: fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_fft2_symint
- func: fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_fft2_symint_out
- func: fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ifft2_symint
- func: fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ifft2_symint_out
- func: fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_rfft2_symint
- func: fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_rfft2_symint_out
- func: fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_irfft2_symint
- func: fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_irfft2_symint_out
- func: fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_hfft2_symint
- func: fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_hfft2_symint_out
- func: fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ihfft2_symint
- func: fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ihfft2_symint_out
- func: fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_fftn_symint
- func: fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_fftn_symint_out
- func: fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ifftn_symint
- func: fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ifftn_symint_out
- func: fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_rfftn_symint
- func: fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_rfftn_symint_out
- func: fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_irfftn_symint
- func: fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_irfftn_symint_out
- func: fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_hfftn_symint
- func: fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_hfftn_symint_out
- func: fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ihfftn_symint
- func: fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
use_const_ref_for_mutable_tensors: True
python_module: fft
variants: function
dispatch:
CompositeImplicitAutograd: fft_ihfftn_symint_out
- func: fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeExplicitAutograd: fft_fftfreq
- func: fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeExplicitAutograd: fft_fftfreq_out
- func: fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
python_module: fft
variants: function
dispatch:
CompositeExplicitAutograd: fft_rfftfreq
- func: fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
python_module: fft
variants: function
dispatch:
CompositeExplicitAutograd: fft_rfftfreq_out
- func: fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
python_module: fft
variants: function
- func: fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
python_module: fft
variants: function
## Functions for linear algebra and the torch.linalg namespace
# Note [linalg namespace binding]
# Functions in the linalg python module should have their names start with
# "linalg_" and be bound to the desired Python name in
# torch/linalg/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/linalg.h.
# The "linalg_" names should be hidden from the user and not documented.
#
# See linalg_det as an example.
# "_ex" stands for experimental
- func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
python_module: linalg
structured_delegate: linalg_cholesky_ex.L
- func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
python_module: linalg
structured: True
dispatch:
CPU, CUDA: linalg_cholesky_ex_out
- func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
python_module: linalg
- func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
python_module: linalg
variants: function
structured_delegate: linalg_cross.out
dispatch:
ZeroTensor: linalg_cross_zerotensor
- func: linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
structured: True
dispatch:
CPU, CUDA, MPS: linalg_cross_out
# linalg.lu_factor
- func: linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
python_module: linalg
variants: function
- func: linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
python_module: linalg
variants: function
- func: linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
python_module: linalg
structured_delegate: linalg_lu_factor_ex.out
variants: function
- func: linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
python_module: linalg
variants: function
structured: True
dispatch:
CPU, CUDA: linalg_lu_factor_ex_out
# linalg.lu
- func: linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
python_module: linalg
structured_delegate: linalg_lu.out
variants: function
- func: linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
python_module: linalg
variants: function
structured: True
dispatch:
CPU, CUDA: linalg_lu_out
# linalg.lu_solve
- func: linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
python_module: linalg
structured_delegate: linalg_lu_solve.out
variants: function
- func: linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
structured: True
dispatch:
CPU, CUDA: linalg_lu_solve_out
# linalg.det
- func: _linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
structured_delegate: _linalg_det.result
- func: _linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
structured: True
dispatch:
CPU, CUDA: _linalg_det_out
- func: linalg_det(Tensor A) -> Tensor
python_module: linalg
variants: function
- func: linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
# torch.det, alias for torch.linalg.det
- func: det(Tensor self) -> Tensor
variants: function, method
- func: linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
structured_delegate: linalg_ldl_factor_ex.out
python_module: linalg
variants: function
- func: linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
structured: True
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_ldl_factor_ex_out
- func: linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
python_module: linalg
variants: function
- func: linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
python_module: linalg
variants: function
- func: linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
structured_delegate: linalg_ldl_solve.out
python_module: linalg
variants: function
- func: linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
structured: True
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_ldl_solve_out
- func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
python_module: linalg
variants: function
dispatch:
CompositeExplicitAutograd: linalg_lstsq
tags: dynamic_output_shape
- func: linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_lstsq_out
tags: dynamic_output_shape
# torch.linalg.matmul, alias for torch.matmul
- func: linalg_matmul(Tensor self, Tensor other) -> Tensor
python_module: linalg
variants: function
- func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
python_module: linalg
variants: function
- func: linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_matrix_exp(Tensor self) -> Tensor
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_matrix_exp
autogen: linalg_matrix_exp.out
- func: _linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
structured_delegate: _linalg_slogdet.sign
- func: _linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
structured: True
dispatch:
CPU, CUDA: _linalg_slogdet_out
- func: linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
python_module: linalg
- func: linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
python_module: linalg
- func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
variants: function, method
- func: slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
variants: function
- func: logdet(Tensor self) -> Tensor
variants: function, method
- func: linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_eig
- func: linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
python_module: linalg
dispatch:
CPU, CUDA: linalg_eig_out
- func: _linalg_eigvals(Tensor self) -> Tensor
python_module: linalg
dispatch:
CPU, CUDA: _linalg_eigvals
- func: linalg_eigvals(Tensor self) -> Tensor
python_module: linalg
- func: linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
dispatch:
CPU, CUDA: linalg_eigvals_out
# This function is exposes the `compute_v` flag, which is then used to implement `linalg.eigh` and
# `linalg.eigvalsh` as composite functions that call this one
- func: _linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
structured_delegate: _linalg_eigh.eigenvalues
- func: _linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
structured: True
dispatch:
CPU, CUDA: _linalg_eigh_out
- func: linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
python_module: linalg
- func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
python_module: linalg
- func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
python_module: linalg
- func: linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor
python_module: linalg
variants: function
dispatch:
CPU, CUDA: linalg_householder_product
- func: linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
dispatch:
CPU, CUDA: linalg_householder_product_out
- func: linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
python_module: linalg
structured_delegate: linalg_inv_ex.inverse
- func: linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
python_module: linalg
structured: True
dispatch:
CPU, CUDA: linalg_inv_ex_out
MPS: linalg_inv_ex_out_mps
- func: linalg_inv(Tensor A) -> Tensor
python_module: linalg
- func: linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: inverse(Tensor self) -> Tensor
variants: function, method
- func: inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
- func: inner(Tensor self, Tensor other) -> Tensor
variants: function, method
- func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
- func: outer(Tensor self, Tensor vec2) -> Tensor
variants: function, method
- func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
# torch.ger, alias for torch.outer
- func: ger(Tensor self, Tensor vec2) -> Tensor
variants: function, method
- func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
- func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
python_module: linalg
variants: function
- func: linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
python_module: linalg
variants: function
- func: linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
python_module: linalg
variants: function
structured_delegate: linalg_vector_norm.out
- func: linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
structured: True
dispatch:
CPU, CUDA: linalg_vector_norm_out
MPS: linalg_vector_norm_out_mps
- func: linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
python_module: linalg
- func: linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
python_module: linalg
- func: linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
# This function is exposes the `compute_uv` flag, which is then used to implement `linalg.svd` and
# `linalg.svdvals` as composite functions that call this one
- func: _linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
variants: function
structured_delegate: _linalg_svd.U
- func: _linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
structured: True
dispatch:
CPU, CUDA: _linalg_svd_out
- func: linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
python_module: linalg
variants: function
- func: linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
python_module: linalg
variants: function
- func: linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
python_module: linalg
variants: function
- func: linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_cond(Tensor self, Scalar? p=None) -> Tensor
python_module: linalg
variants: function
- func: linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_cond.p_str(Tensor self, str p) -> Tensor
python_module: linalg
variants: function
- func: linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
dispatch:
# calls svd, which calls mH() (view op)
# also calls narrow()
CompositeExplicitAutogradNonFunctional: linalg_pinv
- func: linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
dispatch:
CompositeExplicitAutograd: linalg_pinv_out
- func: linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
cpp_no_default_args: ['atol', 'rtol']
python_module: linalg
variants: function
- func: linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
cpp_no_default_args: ['atol', 'rtol']
python_module: linalg
variants: function
- func: linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
- func: linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
- func: linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: _linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
structured_delegate: _linalg_solve_ex.result
- func: _linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
structured: True
dispatch:
CPU, CUDA: _linalg_solve_ex_out
- func: linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
python_module: linalg
- func: linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
python_module: linalg
- func: linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
python_module: linalg
- func: linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor
python_module: linalg
variants: function
- func: linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
python_module: linalg
variants: function
- func: linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
python_module: linalg
variants: function
structured_delegate: linalg_qr.out
- func: linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
python_module: linalg
structured: True
dispatch:
CPU, CUDA: linalg_qr_out
- func: linalg_matrix_power(Tensor self, int n) -> Tensor
python_module: linalg
- func: linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
- func: linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
- func: linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
cpp_no_default_args: ['atol', 'rtol']
python_module: linalg
variants: function
- func: linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
cpp_no_default_args: ['atol', 'rtol']
python_module: linalg
variants: function
- func: linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
- func: linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
python_module: linalg
variants: function
- func: linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
variants: function
- func: linalg_multi_dot(Tensor[] tensors) -> Tensor
python_module: linalg
- func: linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
python_module: linalg
## Functions related to the `torch.nested` namespace
# Note [nested namespace binding]
# Functions in the nested python module should have their names start with
# "nested_" underscore and be bound to the desired Python name in
# torch/nested/__init__.py, and the desired C++ name in torch/csrc/api/include/torch/nested.h.
# The "nested_" names should be hidden from the user and not documented.
- func: nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
python_module: nested
variants: function
## Functions that are only for testing
# It is undocumented and should not be used outside of tests.
- func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
# Note: for testing COW materialization within `at::parallel_for` loop function
- func: _test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _test_parallel_materialize
# Note: this function is only for testing.
- func: _test_optional_intlist(Tensor values, int[]? addends) -> Tensor
python_module: nn
dispatch:
CPU: _test_optional_intlist
autogen: _test_optional_intlist.out
# Note: this function is only for testing.
- func: _test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
python_module: nn
dispatch:
CPU: _test_optional_intlist
autogen: _test_optional_filled_intlist.out
# Note: this function is only for testing.
- func: _test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
python_module: nn
dispatch:
CPU: _test_optional_floatlist
autogen: _test_optional_floatlist.out
# Note: this function is only for testing.
- func: _test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
python_module: nn
# Note: this function is only for testing.
- func: _test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
python_module: nn
# Note: this function is only for testing.
- func: _test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
cpp_no_default_args: ['a', 'b']
python_module: nn
# Note: this function is only for testing.
- func: _test_warn_in_autograd(Tensor self) -> Tensor
python_module: nn
dispatch:
CompositeExplicitAutograd: _test_warn_in_autograd
autogen: _test_warn_in_autograd.out
# Note: this function is only for testing.
- func: _test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
dispatch:
# the NestedTensor keys are necessary because NestedTensor has been removed
# from the CompositeExplicitAutograd keyset see Note [NestedTensor Not Included in Backend Keys]
CompositeExplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_fullcoverage
autogen: _test_autograd_multiple_dispatch.fullcoverage_out
# Note: this function is only for testing.
- func: _test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
dispatch:
CompositeImplicitAutograd, NestedTensorCPU, NestedTensorCUDA: _test_autograd_multiple_dispatch_ntonly
# Note: this function is only for testing.
- func: _test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
dispatch:
CompositeExplicitAutograd: _test_autograd_multiple_dispatch_view
# Note: this function is only for testing.
- func: _test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _test_autograd_multiple_dispatch_view_copy
tags: view_copy
autogen: _test_autograd_multiple_dispatch_view_copy.out
- func: segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
variants: function
dispatch:
CPU, CUDA: segment_reduce_kernel
autogen: segment_reduce.out
- func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
variants: function
dispatch:
CPU, CUDA: _segment_reduce_backward_kernel
autogen: _segment_reduce_backward.out
- func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
python_module: nn
variants: function
- func: flatten_dense_tensors(Tensor[] tensors) -> Tensor
variants: function
python_module: nn
- func: unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
variants: function
python_module: nn
- func: _nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
variants: function
dispatch:
CompositeExplicitAutograd: _nested_tensor_from_tensor_list
autogen: _nested_tensor_from_tensor_list.out
- func: _fw_primal_copy(Tensor self, int level) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _fw_primal_copy
tags: view_copy
autogen: _fw_primal_copy.out
- func: _make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _make_dual_copy
tags: view_copy
autogen: _make_dual_copy.out
- func: view_as_real_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_as_real_copy
tags: view_copy
autogen: view_as_real_copy.out
- func: view_as_complex_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_as_complex_copy
tags: view_copy
autogen: view_as_complex_copy.out
- func: _conj_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _conj_copy
tags: view_copy
autogen: _conj_copy.out
- func: _neg_view_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _neg_view_copy
tags: view_copy
autogen: _neg_view_copy.out
- func: as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: as_strided_copy_symint
tags: view_copy
autogen: as_strided_copy.out
- func: _sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _sparse_broadcast_to_copy
tags: view_copy
autogen: _sparse_broadcast_to_copy.out
- func: diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: diagonal_copy
tags: view_copy
autogen: diagonal_copy.out
- func: expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: expand_copy_symint
tags: view_copy
autogen: expand_copy.out
- func: permute_copy(Tensor self, int[] dims) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: permute_copy
tags: view_copy
autogen: permute_copy.out
- func: _reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _reshape_alias_copy_symint
tags: view_copy
autogen: _reshape_alias_copy.out
- func: select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: select_copy_symint
SparseCsrCPU, SparseCsrCUDA: select_copy_sparse_csr
tags: view_copy
autogen: select_copy.int_out
- func: detach_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: detach_copy
tags: view_copy
autogen: detach_copy.out
- func: slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: slice_copy_Tensor_symint
tags: view_copy
autogen: slice_copy.Tensor_out
- func: split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: split_copy_Tensor_symint
tags: view_copy
- func: split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: split_with_sizes_copy_symint
tags: view_copy
- func: squeeze_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: squeeze_copy
tags: view_copy
autogen: squeeze_copy.out
- func: squeeze_copy.dim(Tensor self, int dim) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: squeeze_copy_dim
tags: view_copy
autogen: squeeze_copy.dim_out
- func: squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: squeeze_copy_dims
tags: view_copy
autogen: squeeze_copy.dims_out
- func: t_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: t_copy
tags: view_copy
autogen: t_copy.out
- func: transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: transpose_copy_int
tags: view_copy
autogen: transpose_copy.int_out
- func: unsqueeze_copy(Tensor self, int dim) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: unsqueeze_copy
tags: view_copy
autogen: unsqueeze_copy.out
- func: _indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _indices_copy
tags: view_copy
autogen: _indices_copy.out
- func: _values_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: _values_copy
tags: view_copy
autogen: _values_copy.out
- func: indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: indices_copy
tags: view_copy
autogen: indices_copy.out
- func: values_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: values_copy
tags: view_copy
autogen: values_copy.out
- func: crow_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: crow_indices_copy
tags: view_copy
autogen: crow_indices_copy.out
- func: col_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: col_indices_copy
tags: view_copy
autogen: col_indices_copy.out
- func: ccol_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: ccol_indices_copy
tags: view_copy
autogen: ccol_indices_copy.out
- func: row_indices_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: row_indices_copy
tags: view_copy
autogen: row_indices_copy.out
- func: unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: unbind_copy_int
tags: view_copy
- func: unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
variants: function
dispatch:
CompositeExplicitAutograd: unbind_copy_int_out
- func: split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
variants: function
dispatch:
CompositeExplicitAutograd: split_copy_Tensor_out
- func: split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
variants: function
dispatch:
CompositeExplicitAutograd: split_with_sizes_copy_out
CUDA: split_with_sizes_copy_out_cuda
- func: view_copy(Tensor self, SymInt[] size) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_copy_symint
tags: view_copy
autogen: view_copy.out
- func: view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: view_copy_dtype
tags: view_copy
autogen: view_copy.dtype_out
- func: unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: unfold_copy
tags: view_copy
autogen: unfold_copy.out
- func: alias_copy(Tensor self) -> Tensor
variants: function
dispatch:
CompositeExplicitAutogradNonFunctional: alias_copy
tags: view_copy
autogen: alias_copy.out
- func: to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
variants: method
dispatch:
NestedTensorCPU: NestedTensor_to_padded_tensor_generic
NestedTensorCUDA: NestedTensor_to_padded_tensor_cuda
autogen: to_padded_tensor.out
- func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
dispatch:
NestedTensorCPU: NestedTensor_softmax_dropout
NestedTensorCUDA: NestedTensor_softmax_dropout_cuda
tags: nondeterministic_seeded
# Apparently, putting "forward" in the name will cause Python bindings to be skipped, so "fwd" it is.
- func: _transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
variants: function
dispatch:
CPU, CUDA, NestedTensorCPU, NestedTensorCUDA: transformer_encoder_layer_forward
autogen: _transformer_encoder_layer_fwd.out
- func: _native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
variants: function
dispatch:
CPU, NestedTensorCPU: native_multi_head_attention_cpu
CUDA, NestedTensorCUDA: native_multi_head_attention_cuda
autogen: _native_multi_head_attention.out
- func: scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor
python_module: nn
variants: function
autogen: scaled_dot_product_attention.out
tags: nondeterministic_seeded
# This aten function is kept so that we can test the choice function from Python
- func: _fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> int
dispatch:
Meta: _fused_sdp_choice_meta
CPU, NestedTensorCPU: _fused_sdp_choice_cpp
CUDA, NestedTensorCUDA: _fused_sdp_choice_cuda
tags: nondeterministic_seeded
- func: _scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor)
variants: function
tags: nondeterministic_seeded
- func: _scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
dispatch:
CUDA: _scaled_dot_product_flash_attention_cuda
NestedTensorCUDA: _scaled_dot_product_flash_attention_nestedtensor_cuda
tags: nondeterministic_seeded
- func: _scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)
dispatch:
CPU: _scaled_dot_product_flash_attention_cpu
tags: nondeterministic_seeded
- func: _scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
device_check: NoCheck
variants: function
dispatch:
CUDA: _scaled_dot_product_flash_attention_backward_cuda
NestedTensorCUDA: _scaled_dot_product_flash_attention_backward_nested
- func: _scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
device_check: NoCheck
variants: function
dispatch:
CPU: _scaled_dot_product_flash_attention_cpu_backward
- func: _scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)
dispatch:
CUDA: _scaled_dot_product_efficient_attention_cuda
NestedTensorCUDA: _scaled_dot_product_efficient_attention_nestedtensor_cuda
tags: nondeterministic_seeded
- func: _scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)
device_check: NoCheck
dispatch:
CUDA: _scaled_dot_product_efficient_attention_backward_cuda
tags: nondeterministic_seeded
- func: _scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset)
dispatch:
CUDA: _scaled_dot_product_cudnn_attention_cuda
tags: nondeterministic_seeded
- func: _flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
variants: function
dispatch:
CUDA: _flash_attention_forward
tags: nondeterministic_seeded
- func: _flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor)
device_check: NoCheck
variants: function
dispatch:
CUDA: _flash_attention_backward
# Returns output, logsumexp if compute_logsumexp
- func: _efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, int? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? causal_diagonal=None, Tensor? seqlen_k=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)
variants: function
dispatch:
CUDA: _efficient_attention_forward
tags: nondeterministic_seeded
- func: _efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor)
device_check: NoCheck
variants: function
dispatch:
CUDA: _efficient_attention_backward
- func: _triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
variants: function
dispatch:
CUDA: triton_scaled_dot_attention
tags: nondeterministic_seeded
autogen: _triton_scaled_dot_attention.out
- func: _fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)
variants: function
dispatch:
CUDA: _fill_mem_eff_dropout_mask_
tags: nondeterministic_seeded
- func: _triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
variants: function
dispatch:
CUDA: triton_multi_head_attention
autogen: _triton_multi_head_attention.out
- func: special_airy_ai(Tensor x) -> Tensor
python_module: special
structured_delegate: special_airy_ai.out
variants: function
tags: pointwise
- func: special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_airy_ai_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_bessel_j0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_j0.out
variants: function
tags: pointwise
- func: special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_j0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_bessel_j1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_j1.out
variants: function
tags: pointwise
- func: special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_j1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_bessel_y0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_y0.out
variants: function
tags: pointwise
- func: special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_y0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_bessel_y1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_bessel_y1.out
variants: function
tags: pointwise
- func: special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_bessel_y1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_t.out
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_t
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_t
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_t_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_t_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_t_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_u.out
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_u
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_u
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_u_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_u_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_u_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_v.out
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_v
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_v
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_v_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_v_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_v_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_chebyshev_polynomial_w.out
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_w
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_w
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_chebyshev_polynomial_w_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_w_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_chebyshev_polynomial_w_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_hermite_polynomial_h.out
variants: function
tags: pointwise
- func: special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_h
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_h
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_hermite_polynomial_h_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_h_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_h_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_hermite_polynomial_he.out
variants: function
tags: pointwise
- func: special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_he
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_he
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_hermite_polynomial_he_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_he_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_hermite_polynomial_he_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_laguerre_polynomial_l.out
variants: function
tags: pointwise
- func: special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_laguerre_polynomial_l
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_laguerre_polynomial_l
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_laguerre_polynomial_l_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_laguerre_polynomial_l_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_laguerre_polynomial_l_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_legendre_polynomial_p.out
variants: function
tags: pointwise
- func: special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_legendre_polynomial_p
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_legendre_polynomial_p
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_legendre_polynomial_p_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_legendre_polynomial_p_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_legendre_polynomial_p_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_modified_bessel_i0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_i0.out
variants: function
tags: pointwise
- func: special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_i0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_modified_bessel_i1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_i1.out
variants: function
tags: pointwise
- func: special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_i1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_modified_bessel_k0(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_k0.out
variants: function
tags: pointwise
- func: special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_k0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_modified_bessel_k1(Tensor self) -> Tensor
python_module: special
structured_delegate: special_modified_bessel_k1.out
variants: function
tags: pointwise
- func: special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_modified_bessel_k1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_scaled_modified_bessel_k0(Tensor x) -> Tensor
python_module: special
structured_delegate: special_scaled_modified_bessel_k0.out
variants: function
tags: pointwise
- func: special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_scaled_modified_bessel_k0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_scaled_modified_bessel_k1(Tensor x) -> Tensor
python_module: special
structured_delegate: special_scaled_modified_bessel_k1.out
variants: function
tags: pointwise
- func: special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_scaled_modified_bessel_k1_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_t.out
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_t_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_t_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_u.out
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_u_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_u_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_v.out
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_v_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_v_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
device_check: NoCheck
python_module: special
structured_delegate: special_shifted_chebyshev_polynomial_w.out
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck
dispatch:
CPU, CUDA: special_shifted_chebyshev_polynomial_w_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: special_shifted_chebyshev_polynomial_w_out
device_check: NoCheck
python_module: special
variants: function
tags: pointwise
- func: special_spherical_bessel_j0(Tensor x) -> Tensor
python_module: special
structured_delegate: special_spherical_bessel_j0.out
variants: function
tags: pointwise
- func: special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU, CUDA: special_spherical_bessel_j0_out
python_module: special
structured_inherits: TensorIteratorBase
structured: True
variants: function
tags: pointwise
# Aux function used in the test TestPythonDispatch.test_kwarg_only_and_positional_default
# within test/test_python_dispatch.py
- func: _foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
dispatch:
CPU: foobar
autogen: _foobar.out
# Fused Optimizer CUDA kernels.
- func: _fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
variants: function
dispatch:
CUDA: _fused_adam_kernel_cuda_
autogen: _fused_adam, _fused_adam.out
- func: _fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now),
# but still skip the device check as the Tensor LR can be on CPU
device_check: NoCheck
variants: function
dispatch:
CUDA: _fused_adam_kernel_cuda_
autogen: _fused_adam.tensor_lr, _fused_adam.tensor_lr_out
- func: _fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
variants: function
dispatch:
CUDA: _fused_adamw_kernel_cuda_
autogen: _fused_adamw, _fused_adamw.out
- func: _fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now),
# but still skip the device check as the Tensor LR can be on CPU
device_check: NoCheck
variants: function
dispatch:
CUDA: _fused_adamw_kernel_cuda_
autogen: _fused_adamw.tensor_lr, _fused_adamw.tensor_lr_out
- func: _fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
variants: function
dispatch:
CUDA: _fused_sgd_kernel_cuda_
autogen: _fused_sgd, _fused_sgd.out
- func: _fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
# but still skip the device check as the Tensor LR can be on CPU
device_check: NoCheck
variants: function
dispatch:
CUDA: _fused_sgd_kernel_cuda_
autogen: _fused_sgd.tensor_lr, _fused_sgd.tensor_lr_out
# This op is ONLY used by pytorch/XLA in functionalization, and should never show up in vanilla eager mode or in any pytorch tracing contexts.
- func: _propagate_xla_data(Tensor input, Tensor output) -> ()
variants: function