hip
stringlengths 140
3.32k
| cuda
stringlengths 84
3.33k
|
---|---|
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/tensor_protos_db_input.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(TensorProtosDBInput, TensorProtosDBInput<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/tensor_protos_db_input.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(TensorProtosDBInput, TensorProtosDBInput<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/thresholded_relu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] > alpha_ ? X[i] : 0;
}
}
template <typename T>
__global__ void
ThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] > 0 ? dY[i] : 0;
}
}
} // namespace
template <>
bool ThresholdedReluOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ThresholdedReluKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(), X.data<float>(), Y->template mutable_data<float>(), alpha_);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool ThresholdedReluGradientOp<float, HIPContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ThresholdedReluGradientKernel),
dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(ThresholdedRelu, ThresholdedReluOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
ThresholdedReluGradient,
ThresholdedReluGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/thresholded_relu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] > alpha_ ? X[i] : 0;
}
}
template <typename T>
__global__ void
ThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] > 0 ? dY[i] : 0;
}
}
} // namespace
template <>
bool ThresholdedReluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
ThresholdedReluKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), X.data<float>(), Y->template mutable_data<float>(), alpha_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool ThresholdedReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
ThresholdedReluGradientKernel<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(ThresholdedRelu, ThresholdedReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ThresholdedReluGradient,
ThresholdedReluGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/transpose_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Transpose, TransposeOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/transpose_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Transpose, TransposeOp<CUDAContext>);
} // namespace caffe2
### |
#include "hip/hip_runtime.h"
#include "caffe2/operators/unique_ops.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/unique.h>
#include <thrust/version.h>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
#if THRUST_VERSION >= 100800
namespace {
__global__ void remap_kernel(
thrust::device_ptr<int> second_order, thrust::device_ptr<int> order, int* output, int N, int K) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= K)
return;
int idx = second_order[i];
output[order[idx]] = i;
for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {
output[order[idx]] = i;
}
return;
}
}
template <>
template <typename T>
bool UniqueOp<HIPContext>::DoRunWithType() {
auto& inputTensor = Input(0);
int N = inputTensor.dim32(0);
CAFFE_ENFORCE_EQ(inputTensor.dim(), 1, "Input should be a vector");
int* remapping = nullptr;
if (REMAPPING < OutputSize()) {
auto* remappingTensor =
Output(REMAPPING, inputTensor.sizes(), at::dtype<int>());
remapping = remappingTensor->template mutable_data<int>();
}
if (N <= 0) {
Output(UNIQUE, {0}, at::dtype<T>());
return true;
}
const T* input = inputTensor.template data<T>();
ReinitializeTensor(&thrust_unique_buffer_, {N}, at::dtype<T>().device(HIP));
auto* buffer = thrust_unique_buffer_.template mutable_data<T>();
context_.CopyItemsSameDevice(inputTensor.meta(), N, input, buffer);
thrust::device_vector<int> order1(N), order2(N);
thrust::sequence(
thrust::hip::par.on(context_.hip_stream()), order1.begin(), order1.end());
thrust::sequence(
thrust::hip::par.on(context_.hip_stream()), order2.begin(), order2.end());
thrust::sort_by_key(
thrust::hip::par.on(context_.hip_stream()), buffer, buffer + N, order1.begin());
auto new_last = thrust::unique_by_key(
thrust::hip::par.on(context_.hip_stream()), buffer, buffer + N, order2.begin());
int K = new_last.first - buffer;
auto* uniqueTensor = Output(UNIQUE, {K}, at::dtype<T>());
T* unique = uniqueTensor->template mutable_data<T>();
context_.CopyItemsSameDevice(thrust_unique_buffer_.meta(), K, buffer, unique);
if (remapping != nullptr) {
hipLaunchKernelGGL(( remap_kernel), dim3(CAFFE_GET_BLOCKS(K)), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), order2.data(), order1.data(), remapping, N, K);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_HIP_OPERATOR(Unique, UniqueOp<HIPContext>);
#endif
} ### |
#include "caffe2/operators/unique_ops.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/unique.h>
#include <thrust/version.h>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
#if THRUST_VERSION >= 100800
namespace {
__global__ void remap_kernel(
thrust::device_ptr<int> second_order, thrust::device_ptr<int> order, int* output, int N, int K) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= K)
return;
int idx = second_order[i];
output[order[idx]] = i;
for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {
output[order[idx]] = i;
}
return;
}
}
template <>
template <typename T>
bool UniqueOp<CUDAContext>::DoRunWithType() {
auto& inputTensor = Input(0);
int N = inputTensor.dim32(0);
CAFFE_ENFORCE_EQ(inputTensor.dim(), 1, "Input should be a vector");
int* remapping = nullptr;
if (REMAPPING < OutputSize()) {
auto* remappingTensor =
Output(REMAPPING, inputTensor.sizes(), at::dtype<int>());
remapping = remappingTensor->template mutable_data<int>();
}
if (N <= 0) {
Output(UNIQUE, {0}, at::dtype<T>());
return true;
}
const T* input = inputTensor.template data<T>();
ReinitializeTensor(&thrust_unique_buffer_, {N}, at::dtype<T>().device(CUDA));
auto* buffer = thrust_unique_buffer_.template mutable_data<T>();
context_.CopyItemsSameDevice(inputTensor.meta(), N, input, buffer);
thrust::device_vector<int> order1(N), order2(N);
thrust::sequence(
thrust::cuda::par.on(context_.cuda_stream()), order1.begin(), order1.end());
thrust::sequence(
thrust::cuda::par.on(context_.cuda_stream()), order2.begin(), order2.end());
thrust::sort_by_key(
thrust::cuda::par.on(context_.cuda_stream()), buffer, buffer + N, order1.begin());
auto new_last = thrust::unique_by_key(
thrust::cuda::par.on(context_.cuda_stream()), buffer, buffer + N, order2.begin());
int K = new_last.first - buffer;
auto* uniqueTensor = Output(UNIQUE, {K}, at::dtype<T>());
T* unique = uniqueTensor->template mutable_data<T>();
context_.CopyItemsSameDevice(thrust_unique_buffer_.meta(), K, buffer, unique);
if (remapping != nullptr) {
remap_kernel<<<
CAFFE_GET_BLOCKS(K), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
order2.data(), order1.data(), remapping, N, K);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(Unique, UniqueOp<CUDAContext>);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/unsafe_coalesce.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(UnsafeCoalesce, UnsafeCoalesceOp<HIPContext>);
}
### |
#include "caffe2/operators/unsafe_coalesce.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(UnsafeCoalesce, UnsafeCoalesceOp<CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/flags.h"
#include "caffe2/operators/utility_ops.h"
#include <gtest/gtest.h>
C10_DECLARE_string(caffe_test_root);
namespace caffe2 {
static void AddConstInput(
const vector<int64_t>& shape,
const float value,
const string& name,
Workspace* ws) {
DeviceOption option;
option.set_device_type(PROTO_HIP);
HIPContext context(option);
Blob* blob = ws->CreateBlob(name);
auto* tensor = BlobGetMutableTensor(blob, HIP);
tensor->Resize(shape);
math::Set<float, HIPContext>(
tensor->numel(), value, tensor->template mutable_data<float>(), &context);
return;
}
TEST(UtilityOpGPUTest, testReshapeWithScalar) {
if (!HasHipGPU())
return;
Workspace ws;
OperatorDef def;
def.set_name("test_reshape");
def.set_type("Reshape");
def.add_input("X");
def.add_output("XNew");
def.add_output("OldShape");
def.add_arg()->CopyFrom(MakeArgument("shape", vector<int64_t>{1}));
def.mutable_device_option()->set_device_type(PROTO_HIP);
AddConstInput(vector<int64_t>(), 3.14, "X", &ws);
// execute the op
unique_ptr<OperatorBase> op(CreateOperator(def, &ws));
EXPECT_TRUE(op->Run());
Blob* XNew = ws.GetBlob("XNew");
const Tensor& XNewTensor = XNew->Get<Tensor>();
EXPECT_EQ(1, XNewTensor.dim());
EXPECT_EQ(1, XNewTensor.numel());
}
} // namespace caffe2
### |
#include <iostream>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/flags.h"
#include "caffe2/operators/utility_ops.h"
#include <gtest/gtest.h>
C10_DECLARE_string(caffe_test_root);
namespace caffe2 {
static void AddConstInput(
const vector<int64_t>& shape,
const float value,
const string& name,
Workspace* ws) {
DeviceOption option;
option.set_device_type(PROTO_CUDA);
CUDAContext context(option);
Blob* blob = ws->CreateBlob(name);
auto* tensor = BlobGetMutableTensor(blob, CUDA);
tensor->Resize(shape);
math::Set<float, CUDAContext>(
tensor->numel(), value, tensor->template mutable_data<float>(), &context);
return;
}
TEST(UtilityOpGPUTest, testReshapeWithScalar) {
if (!HasCudaGPU())
return;
Workspace ws;
OperatorDef def;
def.set_name("test_reshape");
def.set_type("Reshape");
def.add_input("X");
def.add_output("XNew");
def.add_output("OldShape");
def.add_arg()->CopyFrom(MakeArgument("shape", vector<int64_t>{1}));
def.mutable_device_option()->set_device_type(PROTO_CUDA);
AddConstInput(vector<int64_t>(), 3.14, "X", &ws);
// execute the op
unique_ptr<OperatorBase> op(CreateOperator(def, &ws));
EXPECT_TRUE(op->Run());
Blob* XNew = ws.GetBlob("XNew");
const Tensor& XNewTensor = XNew->Get<Tensor>();
EXPECT_EQ(1, XNewTensor.dim());
EXPECT_EQ(1, XNewTensor.numel());
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/while_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(While, WhileOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/while_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(While, WhileOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/zero_gradient_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(ZeroGradient, ZeroGradientOp<HIPContext>);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/zero_gradient_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(ZeroGradient, ZeroGradientOp<CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/rnn/recurrent_network_blob_fetcher_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
RecurrentNetworkBlobFetcher,
RecurrentNetworkBlobFetcherOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rnn/recurrent_network_blob_fetcher_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
RecurrentNetworkBlobFetcher,
RecurrentNetworkBlobFetcherOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_
#define CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/rnn/recurrent_network_executor.h"
#include <map>
namespace caffe2 {
class HIPRecurrentNetworkExecutor : public RecurrentNetworkExecutorBase {
public:
HIPRecurrentNetworkExecutor(
const NetDef& step_net_def,
std::map<string, string>& recurrent_input_map,
std::string timestep_blob)
: RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob) {}
~HIPRecurrentNetworkExecutor();
protected:
bool Run(int T) override;
bool RunBackwards(int T) override;
bool ignoreLinkDependencies() override {
return true;
}
void AnalyzeOps() override {
/**
* Check if there is an op that only depends on ops from previous
* timestep, and that ops is not the last op. Then we can start computation
* in subsequent timesteps before the whole previous timestep has finished.
* If there is no parallelism, we can avoid overhead of event-based
* dependency management.
*/
has_timestep_parallelism_ = false;
for (auto& rnn_op : timestep_ops_template_) {
int i = rnn_op.order;
if (rnn_op.parents.size() >= 1 && i < timestep_ops_template_.size() - 1) {
bool only_recurrent_deps = std::all_of(
rnn_op.parents.begin(),
rnn_op.parents.end(), [&](const int &parent) {
return parent > i;
}
);
if (only_recurrent_deps) {
VLOG(1) << "Timestep parallel op: " << ProtoDebugString(step_net_def_.op(i));
has_timestep_parallelism_ = true;
for (int dep : rnn_op.parents) {
if (dep == timestep_ops_template_.size() - 1) {
// This op depends on the last op of the previous iteration,
// so it will block any parallelism
has_timestep_parallelism_ = false;
break;
}
}
break;
}
}
}
LOG(INFO) << "Analyzed ops for timestep parallelism: " << has_timestep_parallelism_;
}
public:
void setMaxStreams(int n) {
max_hip_streams_ = n;
}
private:
void _ExecRange(int from, int to);
std::vector<hipEvent_t> events_;
bool has_timestep_parallelism_ = false;
int max_hip_streams_ = 2;
};
}
#endif
### |
#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_
#define CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rnn/recurrent_network_executor.h"
#include <map>
namespace caffe2 {
class CUDARecurrentNetworkExecutor : public RecurrentNetworkExecutorBase {
public:
CUDARecurrentNetworkExecutor(
const NetDef& step_net_def,
std::map<string, string>& recurrent_input_map,
std::string timestep_blob)
: RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob) {}
~CUDARecurrentNetworkExecutor();
protected:
bool Run(int T) override;
bool RunBackwards(int T) override;
bool ignoreLinkDependencies() override {
return true;
}
void AnalyzeOps() override {
/**
* Check if there is an op that only depends on ops from previous
* timestep, and that ops is not the last op. Then we can start computation
* in subsequent timesteps before the whole previous timestep has finished.
* If there is no parallelism, we can avoid overhead of event-based
* dependency management.
*/
has_timestep_parallelism_ = false;
for (auto& rnn_op : timestep_ops_template_) {
int i = rnn_op.order;
if (rnn_op.parents.size() >= 1 && i < timestep_ops_template_.size() - 1) {
bool only_recurrent_deps = std::all_of(
rnn_op.parents.begin(),
rnn_op.parents.end(), [&](const int &parent) {
return parent > i;
}
);
if (only_recurrent_deps) {
VLOG(1) << "Timestep parallel op: " << ProtoDebugString(step_net_def_.op(i));
has_timestep_parallelism_ = true;
for (int dep : rnn_op.parents) {
if (dep == timestep_ops_template_.size() - 1) {
// This op depends on the last op of the previous iteration,
// so it will block any parallelism
has_timestep_parallelism_ = false;
break;
}
}
break;
}
}
}
LOG(INFO) << "Analyzed ops for timestep parallelism: " << has_timestep_parallelism_;
}
public:
void setMaxStreams(int n) {
max_cuda_streams_ = n;
}
private:
void _ExecRange(int from, int to);
std::vector<cudaEvent_t> events_;
bool has_timestep_parallelism_ = false;
int max_cuda_streams_ = 2;
};
}
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
template <typename scalar_t>
void threshold_kernel_impl(
TensorIteratorBase& iter,
scalar_t threshold,
scalar_t value) {
gpu_kernel_with_scalars(
iter, [=] GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_hip(
TensorIteratorBase& iter,
const Scalar& threshold,
const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"threshold_hip",
[&] {
threshold_kernel_impl<scalar_t>(
iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
} // namespace
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
template <typename scalar_t>
void threshold_kernel_impl(
TensorIteratorBase& iter,
scalar_t threshold,
scalar_t value) {
gpu_kernel_with_scalars(
iter, [=] GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(
TensorIteratorBase& iter,
const Scalar& threshold,
const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"threshold_cuda",
[&] {
threshold_kernel_impl<scalar_t>(
iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
} // namespace
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/rnn/recurrent_network_op.h"
namespace caffe2 {
namespace detail {
template <typename T, typename Context>
void initializeRecurrentInput(
const RecurrentInput& rc,
int32_t seqLen,
int32_t batchSize,
Workspace* ws,
Context* context);
namespace {
template <typename T>
__global__
void initRecurrentInput_kernel(
size_t stateSize,
const T* input,
T* state) {
// index into appropriate target buffer
const int block_id = blockIdx.x;
T* state_local = state + block_id*stateSize;
// copy
for (int idx=threadIdx.x; idx < stateSize; idx+=blockDim.x) {
state_local[idx] = input[idx];
}
}
}; // namespace
template <>
void repeatCopy(
size_t repeat_n,
size_t n,
const float* src,
float* dst,
HIPContext* context) {
hipLaunchKernelGGL(( initRecurrentInput_kernel<float>), dim3(repeat_n), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(),
n, src, dst);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <>
void repeatCopy(
size_t repeat_n,
size_t n,
const at::Half* src,
at::Half* dst,
HIPContext* context) {
hipLaunchKernelGGL(( initRecurrentInput_kernel<at::Half>), dim3(repeat_n), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(),
n, src, dst);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}; // namespace detail
template <>
bool RecurrentNetworkOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
template <>
bool RecurrentNetworkGradientOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
template <>
bool AccumulateInputGradientOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(1));
}
template <>
bool RNNApplyLinkOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(1));
}
REGISTER_HIP_OPERATOR(
RecurrentNetwork,
RecurrentNetworkOp<HIPContext>);
REGISTER_HIP_OPERATOR(
RecurrentNetworkGradient,
RecurrentNetworkGradientOp<HIPContext>);
REGISTER_HIP_OPERATOR(
rnn_internal_accumulate_gradient_input,
AccumulateInputGradientOp<HIPContext>);
REGISTER_HIP_OPERATOR(
rnn_internal_apply_link,
RNNApplyLinkOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rnn/recurrent_network_op.h"
namespace caffe2 {
namespace detail {
template <typename T, typename Context>
void initializeRecurrentInput(
const RecurrentInput& rc,
int32_t seqLen,
int32_t batchSize,
Workspace* ws,
Context* context);
namespace {
template <typename T>
__global__
void initRecurrentInput_kernel(
size_t stateSize,
const T* input,
T* state) {
// index into appropriate target buffer
const int block_id = blockIdx.x;
T* state_local = state + block_id*stateSize;
// copy
for (int idx=threadIdx.x; idx < stateSize; idx+=blockDim.x) {
state_local[idx] = input[idx];
}
}
}; // namespace
template <>
void repeatCopy(
size_t repeat_n,
size_t n,
const float* src,
float* dst,
CUDAContext* context) {
initRecurrentInput_kernel<float><<<repeat_n, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, src, dst);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <>
void repeatCopy(
size_t repeat_n,
size_t n,
const at::Half* src,
at::Half* dst,
CUDAContext* context) {
initRecurrentInput_kernel<at::Half><<<repeat_n, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, src, dst);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}; // namespace detail
template <>
bool RecurrentNetworkOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
template <>
bool RecurrentNetworkGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
template <>
bool AccumulateInputGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(1));
}
template <>
bool RNNApplyLinkOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(1));
}
REGISTER_CUDA_OPERATOR(
RecurrentNetwork,
RecurrentNetworkOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(
RecurrentNetworkGradient,
RecurrentNetworkGradientOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(
rnn_internal_accumulate_gradient_input,
AccumulateInputGradientOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(
rnn_internal_apply_link,
RNNApplyLinkOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/queue/queue_ops.h"
#include "caffe2/utils/math.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(CreateBlobsQueue, CreateBlobsQueueOp<HIPContext>);
REGISTER_HIP_OPERATOR(EnqueueBlobs, EnqueueBlobsOp<HIPContext>);
REGISTER_HIP_OPERATOR(DequeueBlobs, DequeueBlobsOp<HIPContext>);
REGISTER_HIP_OPERATOR(CloseBlobsQueue, CloseBlobsQueueOp<HIPContext>);
REGISTER_HIP_OPERATOR(SafeEnqueueBlobs, SafeEnqueueBlobsOp<HIPContext>);
REGISTER_HIP_OPERATOR(SafeDequeueBlobs, SafeDequeueBlobsOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/queue/queue_ops.h"
#include "caffe2/utils/math.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(CreateBlobsQueue, CreateBlobsQueueOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(EnqueueBlobs, EnqueueBlobsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(DequeueBlobs, DequeueBlobsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(CloseBlobsQueue, CloseBlobsQueueOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SafeEnqueueBlobs, SafeEnqueueBlobsOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SafeDequeueBlobs, SafeDequeueBlobsOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/sgd/iter_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Iter, IterOp<HIPContext>);
REGISTER_HIP_OPERATOR(AtomicIter, AtomicIterOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/iter_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Iter, IterOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(AtomicIter, AtomicIterOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/sgd/lars_op.h"
namespace caffe2 {
__global__ void ComputeLearningRateKernel(
const float* wd,
const float* trust,
const float* lr_max,
float offset,
float lr_min,
float* X_norm,
float* dX_norm,
float* lr_rescaled) {
float val = 1.0;
if (*X_norm > 0) {
val = (*trust) / (*dX_norm / *X_norm + (*wd) + offset);
}
*lr_rescaled = fmaxf(fminf(val, *lr_max), lr_min);
}
template <>
void LarsOp<float, HIPContext>::ComputeLearningRate(
const float* wd,
const float* trust,
const float* lr_max,
float offset,
float lr_min,
float* X_norm,
float* dX_norm,
float* lr_rescaled) {
hipLaunchKernelGGL(( ComputeLearningRateKernel), dim3(1), dim3(1), 0, context_.hip_stream(),
wd, trust, lr_max, offset, lr_min, X_norm, dX_norm, lr_rescaled);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_HIP_OPERATOR(Lars, LarsOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/lars_op.h"
namespace caffe2 {
__global__ void ComputeLearningRateKernel(
const float* wd,
const float* trust,
const float* lr_max,
float offset,
float lr_min,
float* X_norm,
float* dX_norm,
float* lr_rescaled) {
float val = 1.0;
if (*X_norm > 0) {
val = (*trust) / (*dX_norm / *X_norm + (*wd) + offset);
}
*lr_rescaled = fmaxf(fminf(val, *lr_max), lr_min);
}
template <>
void LarsOp<float, CUDAContext>::ComputeLearningRate(
const float* wd,
const float* trust,
const float* lr_max,
float offset,
float lr_min,
float* X_norm,
float* dX_norm,
float* lr_rescaled) {
ComputeLearningRateKernel<<<1, 1, 0, context_.cuda_stream()>>>(
wd, trust, lr_max, offset, lr_min, X_norm, dX_norm, lr_rescaled);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(Lars, LarsOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/sgd/learning_rate_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(LearningRate, LearningRateOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/learning_rate_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(LearningRate, LearningRateOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/sgd/rmsprop_op.h"
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
__global__ void RmsPropUpdate(
int N,
const float* g,
const float* ms,
const float* mom,
float* ng,
float* nms,
float* nmom,
float decay,
float momentum,
float epsilon,
const float* lr) {
HIP_1D_KERNEL_LOOP(i, N) {
// Update new mean square estimate
nms[i] = ms[i] + (1.0f - decay) * (g[i] * g[i] - ms[i]);
// Update momentum estimate
nmom[i] =
mom[i] * momentum + lr[0] * g[i] / sqrtf(epsilon + nms[i]);
// New gradient is the momentum
ng[i] = nmom[i];
}
}
template <>
void rmsprop_update<HIPContext>(
int N,
const float* g,
const float* ms,
const float* mom,
float* ng,
float* nms,
float* nmom,
float decay,
float momentum,
float epsilon,
const float* lr,
HIPContext* context) {
hipLaunchKernelGGL(( RmsPropUpdate), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(),
N, g, ms, mom, ng, nms, nmom, decay, momentum, epsilon, lr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_HIP_OPERATOR(RmsProp, RmsPropOp<float, HIPContext>);
}
### |
#include "caffe2/sgd/rmsprop_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
__global__ void RmsPropUpdate(
int N,
const float* g,
const float* ms,
const float* mom,
float* ng,
float* nms,
float* nmom,
float decay,
float momentum,
float epsilon,
const float* lr) {
CUDA_1D_KERNEL_LOOP(i, N) {
// Update new mean square estimate
nms[i] = ms[i] + (1.0f - decay) * (g[i] * g[i] - ms[i]);
// Update momentum estimate
nmom[i] =
mom[i] * momentum + lr[0] * g[i] / sqrtf(epsilon + nms[i]);
// New gradient is the momentum
ng[i] = nmom[i];
}
}
template <>
void rmsprop_update<CUDAContext>(
int N,
const float* g,
const float* ms,
const float* mom,
float* ng,
float* nms,
float* nmom,
float decay,
float momentum,
float epsilon,
const float* lr,
CUDAContext* context) {
RmsPropUpdate<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
N, g, ms, mom, ng, nms, nmom, decay, momentum, epsilon, lr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(RmsProp, RmsPropOp<float, CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/sgd/weight_scale_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(WeightScale, WeightScaleOp<HIPContext>);
template <typename T>
void weight_scale_update_kernel(
int N,
const T* w,
const T& scale,
int64_t iter,
int64_t stepsize,
int64_t update_upper_bound,
T* nw,
HIPContext* context) {
const auto w_size = N * sizeof(float);
if (iter % stepsize != 0 || iter >= update_upper_bound) {
(void)hipMemcpy(nw, w, w_size, hipMemcpyDefault);
} else {
// perform the weight scaling
caffe2::math::Scale<T, T, HIPContext>(N, scale, w, nw, context);
}
}
template <>
template <typename T>
bool WeightScaleOp<HIPContext>::DoRunWithType() {
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0] + 1;
weight_scale_update_kernel<T>(
Input(WEIGHTS).size(),
Input(WEIGHTS).template data<T>(),
scale_,
iter,
stepsize_,
update_upper_bound_,
Output(OUTPUT_WEIGHTS)->template mutable_data<T>(),
&context_);
return true;
}
} // namespace caffe2
### |
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/weight_scale_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(WeightScale, WeightScaleOp<CUDAContext>);
template <typename T>
void weight_scale_update_kernel(
int N,
const T* w,
const T& scale,
int64_t iter,
int64_t stepsize,
int64_t update_upper_bound,
T* nw,
CUDAContext* context) {
const auto w_size = N * sizeof(float);
if (iter % stepsize != 0 || iter >= update_upper_bound) {
(void)cudaMemcpy(nw, w, w_size, cudaMemcpyDefault);
} else {
// perform the weight scaling
caffe2::math::Scale<T, T, CUDAContext>(N, scale, w, nw, context);
}
}
template <>
template <typename T>
bool WeightScaleOp<CUDAContext>::DoRunWithType() {
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0] + 1;
weight_scale_update_kernel<T>(
Input(WEIGHTS).size(),
Input(WEIGHTS).template data<T>(),
scale_,
iter,
stepsize_,
update_upper_bound_,
Output(OUTPUT_WEIGHTS)->template mutable_data<T>(),
&context_);
return true;
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
// https://github.com/NVIDIA/cub/pull/326
// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
// starting from HIP 11.5
#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
#else
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
#endif
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
namespace caffe2 {
namespace cub = ::CUB_WRAPPED_NAMESPACE::cub;
}
#endif
### |
#pragma once
// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
// https://github.com/NVIDIA/cub/pull/326
// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
// starting from CUDA 11.5
#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
#else
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
#endif
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
namespace caffe2 {
namespace cub = ::CUB_WRAPPED_NAMESPACE::cub;
}
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#ifndef CAFFE2_UTILS_GPU_ATOMICS_H_
#define CAFFE2_UTILS_GPU_ATOMICS_H_
#include <hip/hip_runtime.h>
namespace caffe2 {
namespace {
template <typename T>
inline __device__ void gpu_atomic_add(T* address, const T val) {
atomicAdd(address, val);
}
template <>
inline __device__ void gpu_atomic_add(float* address, const float val) {
#if defined(USE_ROCM) && defined(__gfx908__)
atomicAddNoRet(address, val);
#else
atomicAdd(address, val);
#endif
}
} // namespace
} // namespace caffe2
#endif // CAFFE2_UTILS_GPU_ATOMICS_H_
### |
#ifndef CAFFE2_UTILS_GPU_ATOMICS_H_
#define CAFFE2_UTILS_GPU_ATOMICS_H_
#include <cuda_runtime.h>
namespace caffe2 {
namespace {
template <typename T>
inline __device__ void gpu_atomic_add(T* address, const T val) {
atomicAdd(address, val);
}
template <>
inline __device__ void gpu_atomic_add(float* address, const float val) {
#if defined(USE_ROCM) && defined(__gfx908__)
atomicAddNoRet(address, val);
#else
atomicAdd(address, val);
#endif
}
} // namespace
} // namespace caffe2
#endif // CAFFE2_UTILS_GPU_ATOMICS_H_
### |
#include "hip/hip_runtime.h"
#ifndef CAFFE2_UTILS_GPU_SCAN_UTILS_H_
#define CAFFE2_UTILS_GPU_SCAN_UTILS_H_
#include "caffe2/utils/hip/GpuDefs.cuh"
namespace caffe2 {
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void inclusivePrefixScan(T* smem, T in, T* out, BinaryFunction binop) {
smem[threadIdx.x] = in;
__syncthreads();
for (int offset = 1; offset < blockDim.x; offset *= 2) {
T val = 0;
if (threadIdx.x >= offset) {
val = binop(smem[threadIdx.x - offset], smem[threadIdx.x]);
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
*out = smem[threadIdx.x];
if (KillWARDependency) {
__syncthreads();
}
}
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void exclusivePrefixScan(T* smem, T in, T* out, T* carry, BinaryFunction binop) {
inclusivePrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
*out -= in;
*carry = smem[blockDim.x - 1];
if (KillWARDependency) {
__syncthreads();
}
}
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
#if defined(USE_ROCM)
unsigned long long int vote = __ballot(in);
T index = __popcll(getLaneMaskLe() & vote);
T carry = __popcll(vote);
#else
T vote = __ballot_sync(__activemask(), in);
T index = __popc(getLaneMaskLe() & vote);
T carry = __popc(vote);
#endif
int warp = threadIdx.x / kWarpSize;
if (getLaneId() == 0) {
smem[warp] = carry;
}
__syncthreads();
if (threadIdx.x == 0) {
int current = 0;
for (int i = 0; i < blockDim.x / kWarpSize; ++i) {
T v = smem[i];
smem[i] = binop(smem[i], current);
current = binop(current, v);
}
}
__syncthreads();
if (warp >= 1) {
index = binop(index, smem[warp - 1]);
}
*out = index;
if (KillWARDependency) {
__syncthreads();
}
}
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
*out -= (T) in;
#if defined(USE_ROCM)
*carry = smem[math::DivUp<int>(blockDim.x, kWarpSize) - 1];
#else
*carry = smem[(blockDim.x / kWarpSize) - 1];
#endif
if (KillWARDependency) {
__syncthreads();
}
}
}
#endif ### |
#ifndef CAFFE2_UTILS_GPU_SCAN_UTILS_H_
#define CAFFE2_UTILS_GPU_SCAN_UTILS_H_
#include "caffe2/utils/GpuDefs.cuh"
namespace caffe2 {
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void inclusivePrefixScan(T* smem, T in, T* out, BinaryFunction binop) {
smem[threadIdx.x] = in;
__syncthreads();
for (int offset = 1; offset < blockDim.x; offset *= 2) {
T val = 0;
if (threadIdx.x >= offset) {
val = binop(smem[threadIdx.x - offset], smem[threadIdx.x]);
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
*out = smem[threadIdx.x];
if (KillWARDependency) {
__syncthreads();
}
}
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void exclusivePrefixScan(T* smem, T in, T* out, T* carry, BinaryFunction binop) {
inclusivePrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
*out -= in;
*carry = smem[blockDim.x - 1];
if (KillWARDependency) {
__syncthreads();
}
}
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
#if defined(USE_ROCM)
unsigned long long int vote = __ballot(in);
T index = __popcll(getLaneMaskLe() & vote);
T carry = __popcll(vote);
#else
T vote = __ballot_sync(__activemask(), in);
T index = __popc(getLaneMaskLe() & vote);
T carry = __popc(vote);
#endif
int warp = threadIdx.x / kWarpSize;
if (getLaneId() == 0) {
smem[warp] = carry;
}
__syncthreads();
if (threadIdx.x == 0) {
int current = 0;
for (int i = 0; i < blockDim.x / kWarpSize; ++i) {
T v = smem[i];
smem[i] = binop(smem[i], current);
current = binop(current, v);
}
}
__syncthreads();
if (warp >= 1) {
index = binop(index, smem[warp - 1]);
}
*out = index;
if (KillWARDependency) {
__syncthreads();
}
}
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
*out -= (T) in;
#if defined(USE_ROCM)
*carry = smem[math::DivUp<int>(blockDim.x, kWarpSize) - 1];
#else
*carry = smem[(blockDim.x / kWarpSize) - 1];
#endif
if (KillWARDependency) {
__syncthreads();
}
}
}
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char airy_ai_name[] = "airy_ai_forward";
void airy_ai_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "airy_ai_hip", [&]() {
jitted_gpu_kernel<airy_ai_name, scalar_t, scalar_t, 1>(iterator, airy_ai_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "airy_ai_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return airy_ai_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
} // anonymous namespace
REGISTER_DISPATCH(special_airy_ai_stub, &airy_ai_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char airy_ai_name[] = "airy_ai_forward";
void airy_ai_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "airy_ai_cuda", [&]() {
jitted_gpu_kernel<airy_ai_name, scalar_t, scalar_t, 1>(iterator, airy_ai_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "airy_ai_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return airy_ai_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
} // anonymous namespace
REGISTER_DISPATCH(special_airy_ai_stub, &airy_ai_kernel_cuda);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPConfig.h>
#include <ATen/hip\cub.cuh>
namespace at {
namespace hip {
namespace cub {
namespace detail {
template <typename key_t, int value_size>
void radix_sort_pairs_impl(
const key_t* keys_in, key_t* keys_out, const OpaqueType<value_size>* values_in, OpaqueType<value_size>* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {
TORCH_CHECK(
n <= std::numeric_limits<int>::max(), "cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::hip_type<key_t>::type;
auto allocator = c10::hip::HIPCachingAllocator::get();
c10::DataPtr keys_out_owner;
if (keys_out == nullptr) {
keys_out_owner = allocator->allocate(n * sizeof(key_t));
keys_out = reinterpret_cast<key_t*>(keys_out_owner.get());
}
const key_t_* keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_* keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(
NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortPairsDescending, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::hip::getCurrentHIPStream());
} else {
CUB_WRAPPER(
NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortPairs, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::hip::getCurrentHIPStream());
}
}
#define AT_INSTANTIATE_SORT_PAIRS(key_t, value_size) template void radix_sort_pairs_impl( const key_t* keys_in, key_t* keys_out, const OpaqueType<value_size>* values_in, OpaqueType<value_size>* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
AT_INSTANTIATE_SORT_PAIRS(int32_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 4)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 4)
#define AT_INSTANTIATE_SORT_PAIRS_8(scalar_t, ScalarType) AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)
#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)
AT_INSTANTIATE_SORT_PAIRS(c10::BFloat16, 8)
#endif
}
}
}
} ### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAConfig.h>
#include <ATen/cuda/cub.cuh>
namespace at {
namespace cuda {
namespace cub {
namespace detail {
template <typename key_t, int value_size>
void radix_sort_pairs_impl(
const key_t* keys_in, key_t* keys_out, const OpaqueType<value_size>* values_in, OpaqueType<value_size>* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {
TORCH_CHECK(
n <= std::numeric_limits<int>::max(), "cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
auto allocator = c10::cuda::CUDACachingAllocator::get();
c10::DataPtr keys_out_owner;
if (keys_out == nullptr) {
keys_out_owner = allocator->allocate(n * sizeof(key_t));
keys_out = reinterpret_cast<key_t*>(keys_out_owner.get());
}
const key_t_* keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_* keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
} else {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
}
}
#define AT_INSTANTIATE_SORT_PAIRS(key_t, value_size) template void radix_sort_pairs_impl( const key_t* keys_in, key_t* keys_out, const OpaqueType<value_size>* values_in, OpaqueType<value_size>* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
AT_INSTANTIATE_SORT_PAIRS(int32_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int32_t, 4)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 1)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 2)
AT_INSTANTIATE_SORT_PAIRS(int64_t, 4)
#define AT_INSTANTIATE_SORT_PAIRS_8(scalar_t, ScalarType) AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)
#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)
AT_INSTANTIATE_SORT_PAIRS(c10::BFloat16, 8)
#endif
}
}
}
}
### |
#include "hip/hip_runtime.h"
#ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_
#define CAFFE2_UTILS_MATH_REDUCE_CUH_
#include "caffe2/utils/cub_namespace.cuh"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/hip/common_gpu.h"
namespace caffe2 {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_HIP_NUM_THREADS>;
template <typename T, int kBlockDimX, int kBlockDimY>
using BlockReduce2D = hipcub::
BlockReduce<T, kBlockDimX, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, kBlockDimY>;
#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1( size, Func, T, grid_dim, hip_stream, ...) do { if (size >= 128) { hipLaunchKernelGGL(( Func<T, 1, 128>) , dim3(grid_dim), dim3(dim3(1, 128)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { hipLaunchKernelGGL(( Func<T, 2, 64>), dim3(grid_dim), dim3(dim3(2, 64)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { hipLaunchKernelGGL(( Func<T, 4, 32>), dim3(grid_dim), dim3(dim3(4, 32)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( Func<T, 8, 16>), dim3(grid_dim), dim3(dim3(8, 16)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } } while (false)
#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_2( size, Func, T1, T2, grid_dim, hip_stream, ...) do { if (size >= 128) { hipLaunchKernelGGL(( Func<T1, T2, 1, 128>) , dim3(grid_dim), dim3(dim3(1, 128)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { hipLaunchKernelGGL(( Func<T1, T2, 2, 64>) , dim3(grid_dim), dim3(dim3(2, 64)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { hipLaunchKernelGGL(( Func<T1, T2, 4, 32>) , dim3(grid_dim), dim3(dim3(4, 32)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( Func<T1, T2, 8, 16>) , dim3(grid_dim), dim3(dim3(8, 16)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } } while (false)
}
#endif ### |
#ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_
#define CAFFE2_UTILS_MATH_REDUCE_CUH_
#include "caffe2/utils/cub_namespace.cuh"
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/common_gpu.h"
namespace caffe2 {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, int kBlockDimX, int kBlockDimY>
using BlockReduce2D = cub::
BlockReduce<T, kBlockDimX, cub::BLOCK_REDUCE_WARP_REDUCTIONS, kBlockDimY>;
#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1( size, Func, T, grid_dim, cuda_stream, ...) do { if (size >= 128) { Func<T, 1, 128> <<<grid_dim, dim3(1, 128), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { Func<T, 2, 64><<<grid_dim, dim3(2, 64), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { Func<T, 4, 32><<<grid_dim, dim3(4, 32), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { Func<T, 8, 16><<<grid_dim, dim3(8, 16), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } while (false)
#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_2( size, Func, T1, T2, grid_dim, cuda_stream, ...) do { if (size >= 128) { Func<T1, T2, 1, 128> <<<grid_dim, dim3(1, 128), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { Func<T1, T2, 2, 64> <<<grid_dim, dim3(2, 64), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { Func<T1, T2, 4, 32> <<<grid_dim, dim3(4, 32), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { Func<T1, T2, 8, 16> <<<grid_dim, dim3(8, 16), 0, cuda_stream>>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } while (false)
}
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#include <caffe2/core/hip/common_gpu.h>
#include <caffe2/core/hip/context_gpu.h>
#include <caffe2/video/video_input_op.h>
namespace caffe2 {
REGISTER_HIP_OPERATOR(VideoInput, VideoInputOp<HIPContext>);
} // namespace caffe2
### |
#include <caffe2/core/common_gpu.h>
#include <caffe2/core/context_gpu.h>
#include <caffe2/video/video_input_op.h>
namespace caffe2 {
REGISTER_CUDA_OPERATOR(VideoInput, VideoInputOp<CUDAContext>);
} // namespace caffe2
### |
#include <cfloat>
#include "caffe2/core/hip/context_gpu.h"
#include "modules/detectron/sample_as_op.h"
#include <stdio.h>
namespace caffe2 {
template <>
bool SampleAsOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
auto& L = Input(1);
CAFFE_ENFORCE(
X.dim32(0) == L.dim32(0), "X.dim32(0) must be equal to L.dim32(0)", "(", X.dim32(0), " vs. ", L.dim32(0), ")");
std::vector<int> labels(L.dim32(0));
context_.CopyBytes<HIPContext, CPUContext>(
L.dim32(0) * sizeof(int), L.data<int>(), &labels[0]);
context_.FinishDeviceComputation();
int count = 0;
for (int i = 0; i < L.dim32(0); i++) {
if (labels[i] > 0) {
count++;
}
}
assert(count > 0);
vector<int64_t> out_shape(X.sizes().vec());
out_shape[0] = count;
auto* Y = Output(0, out_shape, at::dtype<float>());
const int len = X.size() / X.dim32(0);
float* output = Y->mutable_data<float>();
for (int i = 0; i < L.dim32(0); i++) {
if (labels[i] > 0) {
context_.CopyBytes<HIPContext, HIPContext>(
len * sizeof(float), X.data<float>() + i * len, output);
output += len;
}
}
return true;
}
template <>
bool SampleAsGradientOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
auto& L = Input(1);
auto& dY = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
std::vector<int> labels(L.dim32(0));
context_.CopyBytes<HIPContext, CPUContext>(
L.dim32(0) * sizeof(int), L.data<int>(), &labels[0]);
context_.FinishDeviceComputation();
math::Set<float, HIPContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
const int len = X.size() / X.dim32(0);
const float* input = dY.data<float>();
for (int i = 0; i < L.dim32(0); i++) {
if (labels[i] > 0) {
context_.CopyBytes<HIPContext, HIPContext>(
len * sizeof(float), input, dX->mutable_data<float>() + i * len);
input += len;
}
}
return true;
}
REGISTER_HIP_OPERATOR(SampleAs, SampleAsOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
SampleAsGradient, SampleAsGradientOp<float, HIPContext>);
} ### |
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/sample_as_op.h"
#include <stdio.h>
namespace caffe2 {
template <>
bool SampleAsOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& L = Input(1);
CAFFE_ENFORCE(
X.dim32(0) == L.dim32(0), "X.dim32(0) must be equal to L.dim32(0)", "(", X.dim32(0), " vs. ", L.dim32(0), ")");
std::vector<int> labels(L.dim32(0));
context_.CopyBytes<CUDAContext, CPUContext>(
L.dim32(0) * sizeof(int), L.data<int>(), &labels[0]);
context_.FinishDeviceComputation();
int count = 0;
for (int i = 0; i < L.dim32(0); i++) {
if (labels[i] > 0) {
count++;
}
}
assert(count > 0);
vector<int64_t> out_shape(X.sizes().vec());
out_shape[0] = count;
auto* Y = Output(0, out_shape, at::dtype<float>());
const int len = X.size() / X.dim32(0);
float* output = Y->mutable_data<float>();
for (int i = 0; i < L.dim32(0); i++) {
if (labels[i] > 0) {
context_.CopyBytes<CUDAContext, CUDAContext>(
len * sizeof(float), X.data<float>() + i * len, output);
output += len;
}
}
return true;
}
template <>
bool SampleAsGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& L = Input(1);
auto& dY = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
std::vector<int> labels(L.dim32(0));
context_.CopyBytes<CUDAContext, CPUContext>(
L.dim32(0) * sizeof(int), L.data<int>(), &labels[0]);
context_.FinishDeviceComputation();
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
const int len = X.size() / X.dim32(0);
const float* input = dY.data<float>();
for (int i = 0; i < L.dim32(0); i++) {
if (labels[i] > 0) {
context_.CopyBytes<CUDAContext, CUDAContext>(
len * sizeof(float), input, dX->mutable_data<float>() + i * len);
input += len;
}
}
return true;
}
REGISTER_CUDA_OPERATOR(SampleAs, SampleAsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SampleAsGradient, SampleAsGradientOp<float, CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/ATen.h>
#include <vector>
namespace torch {
namespace distributed {
namespace c10d {
namespace quantization {
at::Tensor _float_to_bfloat16_hip(const at::Tensor& input);
at::Tensor _bfloat16_to_float_hip(const at::Tensor& input);
} // namespace quantization
} // namespace c10d
} // namespace distributed
} // namespace torch
### |
// Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/ATen.h>
#include <vector>
namespace torch {
namespace distributed {
namespace c10d {
namespace quantization {
at::Tensor _float_to_bfloat16_cuda(const at::Tensor& input);
at::Tensor _bfloat16_to_float_cuda(const at::Tensor& input);
} // namespace quantization
} // namespace c10d
} // namespace distributed
} // namespace torch
### |
// !!! This is a file automatically generated by hipify!!!
#define __NVFUSER_BFLOAT_TO_US(var) *(reinterpret_cast<unsigned short*>(&(var)))
#define __NVFUSER_BFLOAT_TO_CUS(var) \
*(reinterpret_cast<const unsigned short*>(&(var)))
struct __bfloat;
__device__ __bfloat __float2bfloat(const float);
struct __align__(2) __bfloat {
__bfloat() = default;
__device__ __bfloat(const float f) {
__x = __float2bfloat(f).__x;
}
protected:
unsigned short __x;
};
__device__ __bfloat __float2bfloat(const float f) {
__bfloat val;
asm("{ cvt.rn.bf16.f32 %0, %1;}\n"
: "=h"(__NVFUSER_BFLOAT_TO_US(val))
: "f"(f));
return val;
}
__device__ float __bfloat2float(const __bfloat h) {
float val;
asm("{ mov.b32 %0, {0,%1};}\n"
: "=f"(val)
: "h"(__NVFUSER_BFLOAT_TO_CUS(h)));
return val;
}
### |
#define __NVFUSER_BFLOAT_TO_US(var) *(reinterpret_cast<unsigned short*>(&(var)))
#define __NVFUSER_BFLOAT_TO_CUS(var) \
*(reinterpret_cast<const unsigned short*>(&(var)))
struct __bfloat;
__device__ __bfloat __float2bfloat(const float);
struct __align__(2) __bfloat {
__bfloat() = default;
__device__ __bfloat(const float f) {
__x = __float2bfloat(f).__x;
}
protected:
unsigned short __x;
};
__device__ __bfloat __float2bfloat(const float f) {
__bfloat val;
asm("{ cvt.rn.bf16.f32 %0, %1;}\n"
: "=h"(__NVFUSER_BFLOAT_TO_US(val))
: "f"(f));
return val;
}
__device__ float __bfloat2float(const __bfloat h) {
float val;
asm("{ mov.b32 %0, {0,%1};}\n"
: "=f"(val)
: "h"(__NVFUSER_BFLOAT_TO_CUS(h)));
return val;
}
### |
// !!! This is a file automatically generated by hipify!!!
struct __align__(2) __bfloat {
__bfloat() = default;
inline __device__ __bfloat(const float f) {
if (f != f) {
__x = uint16_t(0x7FC0);
} else {
union {
uint32_t U32;
float F32;
};
F32 = f;
uint32_t rounding_bias = ((U32 >> 16) & 1) + uint32_t(0x7FFF);
__x = static_cast<uint16_t>((U32 + rounding_bias) >> 16);
}
}
inline __device__ operator float() const {
float res = 0;
uint32_t tmp = __x;
tmp <<= 16;
float* tempRes = reinterpret_cast<float*>(&tmp);
res = *tempRes;
return res;
}
protected:
unsigned short __x;
};
__device__ __bfloat __float2bfloat(const float f) {
return __bfloat(f);
}
__device__ float __bfloat2float(const __bfloat h) {
return float(h);
}
### |
struct __align__(2) __bfloat {
__bfloat() = default;
inline __device__ __bfloat(const float f) {
if (f != f) {
__x = uint16_t(0x7FC0);
} else {
union {
uint32_t U32;
float F32;
};
F32 = f;
uint32_t rounding_bias = ((U32 >> 16) & 1) + uint32_t(0x7FFF);
__x = static_cast<uint16_t>((U32 + rounding_bias) >> 16);
}
}
inline __device__ operator float() const {
float res = 0;
uint32_t tmp = __x;
tmp <<= 16;
float* tempRes = reinterpret_cast<float*>(&tmp);
res = *tempRes;
return res;
}
protected:
unsigned short __x;
};
__device__ __bfloat __float2bfloat(const float f) {
return __bfloat(f);
}
__device__ float __bfloat2float(const __bfloat h) {
return float(h);
}
### |
// !!! This is a file automatically generated by hipify!!!
// Default block synchronization. Just use __barrier_sync
namespace block_sync {
__forceinline__ __device__ void init() {}
// Thread-block synchronization
__forceinline__ __device__ void sync() {
__barrier_sync(0);
}
} // namespace block_sync
### |
// Default block synchronization. Just use __barrier_sync
namespace block_sync {
__forceinline__ __device__ void init() {}
// Thread-block synchronization
__forceinline__ __device__ void sync() {
__barrier_sync(0);
}
} // namespace block_sync
### |
// !!! This is a file automatically generated by hipify!!!
#define __NVFUSER_HALF_TO_US(var) *(reinterpret_cast<unsigned short*>(&(var)))
#define __NVFUSER_HALF_TO_CUS(var) \
*(reinterpret_cast<const unsigned short*>(&(var)))
struct __half;
__device__ __half __float2half(const float);
struct __align__(2) __half {
__half() = default;
__device__ __half(const float f) {
__x = __float2half(f).__x;
}
protected:
unsigned short __x;
};
__device__ __half __float2half(const float f) {
__half val;
asm("{ cvt.rn.f16.f32 %0, %1;}\n"
: "=h"(__NVFUSER_HALF_TO_US(val))
: "f"(f));
return val;
}
__device__ float __half2float(const __half h) {
float val;
asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__NVFUSER_HALF_TO_CUS(h)));
return val;
}
__device__ __half __double2half(const double d) {
#if __HIP_ARCH__ >= 700
__half val;
asm("{ cvt.rn.f16.f64 %0, %1;}\n"
: "=h"(__NVFUSER_HALF_TO_US(val))
: "d"(d));
return val;
#else
return __float2half(static_cast<float>(d));
#endif
}
__device__ double __half2double(const __half h) {
#if __HIP_ARCH__ >= 700
double val;
asm("{ cvt.f64.f16 %0, %1;}\n" : "=d"(val) : "h"(__NVFUSER_HALF_TO_CUS(h)));
return val;
#else
return static_cast<double>(__half2float(h));
#endif
}
### |
#define __NVFUSER_HALF_TO_US(var) *(reinterpret_cast<unsigned short*>(&(var)))
#define __NVFUSER_HALF_TO_CUS(var) \
*(reinterpret_cast<const unsigned short*>(&(var)))
struct __half;
__device__ __half __float2half(const float);
struct __align__(2) __half {
__half() = default;
__device__ __half(const float f) {
__x = __float2half(f).__x;
}
protected:
unsigned short __x;
};
__device__ __half __float2half(const float f) {
__half val;
asm("{ cvt.rn.f16.f32 %0, %1;}\n"
: "=h"(__NVFUSER_HALF_TO_US(val))
: "f"(f));
return val;
}
__device__ float __half2float(const __half h) {
float val;
asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__NVFUSER_HALF_TO_CUS(h)));
return val;
}
__device__ __half __double2half(const double d) {
#if __CUDA_ARCH__ >= 700
__half val;
asm("{ cvt.rn.f16.f64 %0, %1;}\n"
: "=h"(__NVFUSER_HALF_TO_US(val))
: "d"(d));
return val;
#else
return __float2half(static_cast<float>(d));
#endif
}
__device__ double __half2double(const __half h) {
#if __CUDA_ARCH__ >= 700
double val;
asm("{ cvt.f64.f16 %0, %1;}\n" : "=d"(val) : "h"(__NVFUSER_HALF_TO_CUS(h)));
return val;
#else
return static_cast<double>(__half2float(h));
#endif
}
### |
namespace fused_reduction {
template <
int NumVals, typename DataTypeT, typename IndexTypeT, template <int, typename>
typename MakeTuple>
struct WelfordTripletTuple {
static constexpr int num_vals = NumVals;
using DataType = DataTypeT;
using IndexType = IndexTypeT;
using DataTuple = typename MakeTuple<NumVals, DataType>::type;
using IndexTuple = typename MakeTuple<NumVals, IndexType>::type;
DataTuple avg;
DataTuple var;
IndexTuple N;
WelfordTripletTuple(
const DataTuple& avg, const DataTuple& var, const IndexTuple& N)
: avg(avg), var(var), N(N) {}
};
template <int NumVals, typename DataType, typename IndexType>
using LocalWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataType, IndexType, MakeLocalTuple>;
template <int NumVals, typename DataType, typename IndexType>
using RefWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataType, IndexType, MakeRefTuple>;
template <int NumVals, typename DataType, typename IndexType>
using ConstRefWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataType, IndexType, MakeConstRefTuple>;
template <int NumVals, typename DataTypeT, typename IndexTypeT>
using VolatilePtrWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataTypeT, IndexTypeT, MakeVolatilePtrTuple>;
template <typename WelfordTripletTupleType>
__inline__ __device__ static void operator+=(
WelfordTripletTupleType& triplet, nvfuser_index_t offset) {
triplet.avg += offset;
triplet.var += offset;
triplet.N += offset;
}
template <typename DstType, typename SrcType>
__inline__ __device__ static void copyWelfordTripletTuple(
DstType& dst, nvfuser_index_t dst_offset, const SrcType& src, nvfuser_index_t src_offset = 0) {
copyTuple(dst.avg, dst_offset, src.avg, src_offset);
copyTuple(dst.var, dst_offset, src.var, src_offset);
copyTuple(dst.N, dst_offset, src.N, src_offset);
}
template <typename DstType, typename SrcType>
__inline__ __device__ static void copyWelfordTripletTuple(
DstType& dst, const SrcType& src, nvfuser_index_t src_offset = 0) {
copyWelfordTripletTuple(dst, 0, src, src_offset);
}
template <typename DstType, typename SrcType, typename PredType>
__inline__ __device__ static void copyWelfordTripletTupleIf(
DstType& dst, const SrcType& src, const PredType& pred) {
copyTupleIf(dst.avg, src.avg, pred);
copyTupleIf(dst.var, src.var, pred);
copyTupleIf(dst.N, src.N, pred);
}
} ### |
namespace fused_reduction {
template <
int NumVals, typename DataTypeT, typename IndexTypeT, template <int, typename>
typename MakeTuple>
struct WelfordTripletTuple {
static constexpr int num_vals = NumVals;
using DataType = DataTypeT;
using IndexType = IndexTypeT;
using DataTuple = typename MakeTuple<NumVals, DataType>::type;
using IndexTuple = typename MakeTuple<NumVals, IndexType>::type;
DataTuple avg;
DataTuple var;
IndexTuple N;
WelfordTripletTuple(
const DataTuple& avg, const DataTuple& var, const IndexTuple& N)
: avg(avg), var(var), N(N) {}
};
template <int NumVals, typename DataType, typename IndexType>
using LocalWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataType, IndexType, MakeLocalTuple>;
template <int NumVals, typename DataType, typename IndexType>
using RefWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataType, IndexType, MakeRefTuple>;
template <int NumVals, typename DataType, typename IndexType>
using ConstRefWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataType, IndexType, MakeConstRefTuple>;
template <int NumVals, typename DataTypeT, typename IndexTypeT>
using VolatilePtrWelfordTripletTuple =
WelfordTripletTuple<NumVals, DataTypeT, IndexTypeT, MakeVolatilePtrTuple>;
template <typename WelfordTripletTupleType>
__inline__ __device__ static void operator+=(
WelfordTripletTupleType& triplet, nvfuser_index_t offset) {
triplet.avg += offset;
triplet.var += offset;
triplet.N += offset;
}
template <typename DstType, typename SrcType>
__inline__ __device__ static void copyWelfordTripletTuple(
DstType& dst, nvfuser_index_t dst_offset, const SrcType& src, nvfuser_index_t src_offset = 0) {
copyTuple(dst.avg, dst_offset, src.avg, src_offset);
copyTuple(dst.var, dst_offset, src.var, src_offset);
copyTuple(dst.N, dst_offset, src.N, src_offset);
}
template <typename DstType, typename SrcType>
__inline__ __device__ static void copyWelfordTripletTuple(
DstType& dst, const SrcType& src, nvfuser_index_t src_offset = 0) {
copyWelfordTripletTuple(dst, 0, src, src_offset);
}
template <typename DstType, typename SrcType, typename PredType>
__inline__ __device__ static void copyWelfordTripletTupleIf(
DstType& dst, const SrcType& src, const PredType& pred) {
copyTupleIf(dst.avg, src.avg, pred);
copyTupleIf(dst.var, src.var, pred);
copyTupleIf(dst.N, src.N, pred);
}
}
### |
// !!! This is a file automatically generated by hipify!!!
namespace grid_broadcast {
// Broadcasts per-thread values across threads and blocks.
//
// Function parameters:
// - out: Per-thread output location
// - inp_val: Per-thread input value
// - work_buf: Temporary buffer for communication across threads/blocks
// - sync_flags: A vector of integers for synchronizations
//
// Template parameters:
// - X/Y/Z_BLOCK: When true, broadcasts across thread blocks along the X/Y/Z
// dimensions
// - X/Y/Z_THREAD: When true, broadcasts across threads along the X/Y/Z
// dimensions
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename T>
__device__ void broadcast(
T& out,
const T& inp_val,
volatile T* work_buf,
Tensor<int64_t, 1> sync_flags,
bool read_write_pred) {
// Number of values broadcasted in the grid dimensions
const auto grid_seg_size =
index_utils::maskedSize<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim);
// Index of the broadcast we're performing out of the grid_seg_size
const auto grid_seg_idx =
index_utils::maskedOffset<!X_BLOCK, !Y_BLOCK, !Z_BLOCK>(
blockIdx, gridDim);
// Number of threads not participating in a broadcast dimension, this is the
// number of thread entries to expect in the work buffer, therefore a striding
const auto block_stride =
index_utils::maskedSize<!X_THREAD, !Y_THREAD, !Z_THREAD>(blockDim);
// Which broadcast in the block this is to line up the entry with the work
// buffer
const auto thread_offset =
index_utils::maskedOffset<!X_THREAD, !Y_THREAD, !Z_THREAD>(
threadIdx, blockDim);
const bool has_valid_data = (!X_BLOCK || blockIdx.x == gridDim.x - 1) &&
(!Y_BLOCK || blockIdx.y == gridDim.y - 1) &&
(!Z_BLOCK || blockIdx.z == gridDim.z - 1) &&
(!X_THREAD || threadIdx.x == 0) && (!Y_THREAD || threadIdx.y == 0) &&
(!Z_THREAD || threadIdx.z == 0);
if (has_valid_data && read_write_pred) {
work_buf[grid_seg_idx * block_stride + thread_offset] = inp_val;
__threadfence();
}
grid_sync::sync<X_BLOCK, Y_BLOCK, Z_BLOCK, true>(
sync_flags[grid_seg_idx], grid_seg_size);
if (read_write_pred) {
out = work_buf[grid_seg_idx * block_stride + thread_offset];
}
// Make sure everyone has read from the buffer before continuing the kernel
// and potentially overwriting
grid_sync::sync<X_BLOCK, Y_BLOCK, Z_BLOCK, true>(
sync_flags[grid_seg_idx], grid_seg_size);
}
} // namespace grid_broadcast
### |
namespace grid_broadcast {
// Broadcasts per-thread values across threads and blocks.
//
// Function parameters:
// - out: Per-thread output location
// - inp_val: Per-thread input value
// - work_buf: Temporary buffer for communication across threads/blocks
// - sync_flags: A vector of integers for synchronizations
//
// Template parameters:
// - X/Y/Z_BLOCK: When true, broadcasts across thread blocks along the X/Y/Z
// dimensions
// - X/Y/Z_THREAD: When true, broadcasts across threads along the X/Y/Z
// dimensions
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename T>
__device__ void broadcast(
T& out,
const T& inp_val,
volatile T* work_buf,
Tensor<int64_t, 1> sync_flags,
bool read_write_pred) {
// Number of values broadcasted in the grid dimensions
const auto grid_seg_size =
index_utils::maskedSize<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim);
// Index of the broadcast we're performing out of the grid_seg_size
const auto grid_seg_idx =
index_utils::maskedOffset<!X_BLOCK, !Y_BLOCK, !Z_BLOCK>(
blockIdx, gridDim);
// Number of threads not participating in a broadcast dimension, this is the
// number of thread entries to expect in the work buffer, therefore a striding
const auto block_stride =
index_utils::maskedSize<!X_THREAD, !Y_THREAD, !Z_THREAD>(blockDim);
// Which broadcast in the block this is to line up the entry with the work
// buffer
const auto thread_offset =
index_utils::maskedOffset<!X_THREAD, !Y_THREAD, !Z_THREAD>(
threadIdx, blockDim);
const bool has_valid_data = (!X_BLOCK || blockIdx.x == gridDim.x - 1) &&
(!Y_BLOCK || blockIdx.y == gridDim.y - 1) &&
(!Z_BLOCK || blockIdx.z == gridDim.z - 1) &&
(!X_THREAD || threadIdx.x == 0) && (!Y_THREAD || threadIdx.y == 0) &&
(!Z_THREAD || threadIdx.z == 0);
if (has_valid_data && read_write_pred) {
work_buf[grid_seg_idx * block_stride + thread_offset] = inp_val;
__threadfence();
}
grid_sync::sync<X_BLOCK, Y_BLOCK, Z_BLOCK, true>(
sync_flags[grid_seg_idx], grid_seg_size);
if (read_write_pred) {
out = work_buf[grid_seg_idx * block_stride + thread_offset];
}
// Make sure everyone has read from the buffer before continuing the kernel
// and potentially overwriting
grid_sync::sync<X_BLOCK, Y_BLOCK, Z_BLOCK, true>(
sync_flags[grid_seg_idx], grid_seg_size);
}
} // namespace grid_broadcast
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char bessel_j0_name[] = "bessel_j0_forward";
void bessel_j0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j0_hip", [&]() {
jitted_gpu_kernel<bessel_j0_name, scalar_t, scalar_t, 1>(iterator, bessel_j0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_j0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
} // anonymous namespace
REGISTER_DISPATCH(special_bessel_j0_stub, &bessel_j0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_j0_name[] = "bessel_j0_forward";
void bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j0_cuda", [&]() {
jitted_gpu_kernel<bessel_j0_name, scalar_t, scalar_t, 1>(iterator, bessel_j0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_j0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
} // anonymous namespace
REGISTER_DISPATCH(special_bessel_j0_stub, &bessel_j0_kernel_cuda);
} // namespace at::native
### |
namespace grid_sync {
#define FIRST_UINT64_BIT ((uint64_t)1 << (sizeof(uint64_t) * 8 - 1))
template <typename T>
__device__ T globalAsVolatile(volatile T& global_val) {
return global_val;
}
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool PERSISTENT>
__device__ void sync(
int64_t& semaphore, const uint64_t& segment_size, const bool last_block) {
__threadfence();
block_sync::sync();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
uint64_t semaphore_increment = 1;
if (last_block) {
semaphore_increment = FIRST_UINT64_BIT - (segment_size - 1);
}
uint64_t oldArrive =
atomicAdd(reinterpret_cast<uint64_t*>(&semaphore), semaphore_increment);
unsigned int ns = 8;
while ((PERSISTENT || last_block) &&
((oldArrive ^ globalAsVolatile(semaphore)) & FIRST_UINT64_BIT) ==
0) {
#if __HIP_ARCH__ >= 700
__nanosleep(ns);
if (ns < 256) {
ns *= 2;
}
#endif
}
}
block_sync::sync();
}
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool PERSISTENT>
__device__ void sync(int64_t& semaphore, const uint64_t& segment_size) {
sync<X_BLOCK, Y_BLOCK, Z_BLOCK, PERSISTENT>(
semaphore, segment_size, index_utils::maskedIsLast<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim));
}
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK>
__device__ void sync(
int64_t& semaphore, const uint64_t& segment_size, const nvfuser_index_t n_entrances) {
__threadfence();
block_sync::sync();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
bool last_block =
index_utils::maskedIsLast<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim);
if (last_block) {
int64_t finished_val =
((int64_t)(
index_utils::maskedSize<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim) -
1)) *
((int64_t)n_entrances);
unsigned int ns = 8;
while (globalAsVolatile(semaphore) < finished_val) {
#if __HIP_ARCH__ >= 700
__nanosleep(ns);
if (ns < 256) {
ns *= 2;
}
#endif
}
} else {
auto old = atomicAdd(reinterpret_cast<uint64_t*>(&semaphore), 1);
}
}
block_sync::sync();
}
} ### |
namespace grid_sync {
#define FIRST_UINT64_BIT ((uint64_t)1 << (sizeof(uint64_t) * 8 - 1))
template <typename T>
__device__ T globalAsVolatile(volatile T& global_val) {
return global_val;
}
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool PERSISTENT>
__device__ void sync(
int64_t& semaphore, const uint64_t& segment_size, const bool last_block) {
__threadfence();
block_sync::sync();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
uint64_t semaphore_increment = 1;
if (last_block) {
semaphore_increment = FIRST_UINT64_BIT - (segment_size - 1);
}
uint64_t oldArrive =
atomicAdd(reinterpret_cast<uint64_t*>(&semaphore), semaphore_increment);
unsigned int ns = 8;
while ((PERSISTENT || last_block) &&
((oldArrive ^ globalAsVolatile(semaphore)) & FIRST_UINT64_BIT) ==
0) {
#if __CUDA_ARCH__ >= 700
__nanosleep(ns);
if (ns < 256) {
ns *= 2;
}
#endif
}
}
block_sync::sync();
}
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool PERSISTENT>
__device__ void sync(int64_t& semaphore, const uint64_t& segment_size) {
sync<X_BLOCK, Y_BLOCK, Z_BLOCK, PERSISTENT>(
semaphore, segment_size, index_utils::maskedIsLast<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim));
}
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK>
__device__ void sync(
int64_t& semaphore, const uint64_t& segment_size, const nvfuser_index_t n_entrances) {
__threadfence();
block_sync::sync();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
bool last_block =
index_utils::maskedIsLast<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim);
if (last_block) {
int64_t finished_val =
((int64_t)(
index_utils::maskedSize<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim) -
1)) *
((int64_t)n_entrances);
unsigned int ns = 8;
while (globalAsVolatile(semaphore) < finished_val) {
#if __CUDA_ARCH__ >= 700
__nanosleep(ns);
if (ns < 256) {
ns *= 2;
}
#endif
}
} else {
auto old = atomicAdd(reinterpret_cast<uint64_t*>(&semaphore), 1);
}
}
block_sync::sync();
}
}
### |
// !!! This is a file automatically generated by hipify!!!
namespace index_utils {
// Utility functions
// Total size of provided dimension
template <typename _dim3>
__device__ __forceinline__ nvfuser_index_t size(const _dim3& d) {
return (nvfuser_index_t)d.x * (nvfuser_index_t)d.y * (nvfuser_index_t)d.z;
}
// Linearized indexing of idx based on dim, if bool==false that dimension does
// not participate
template <bool X, bool Y, bool Z, typename _dim3, typename _dim3_2>
__device__ nvfuser_index_t maskedOffset(const _dim3& idx, const _dim3_2& dim) {
nvfuser_index_t offset = 0;
if (Z)
offset += idx.z;
if (Y)
offset = offset * dim.y + idx.y;
if (X)
offset = offset * dim.x + idx.x;
return offset;
}
// Linearized indexing of idx based on dim. All dimensions participate.
template <typename _dim3, typename _dim3_2>
__device__ nvfuser_index_t offset(const _dim3& idx, const _dim3_2& dim) {
nvfuser_index_t offset = idx.z;
offset = offset * dim.y + idx.y;
offset = offset * dim.x + idx.x;
return offset;
}
// Masks the provided dim3, those == false get truncated to 1
template <bool X, bool Y, bool Z, typename _dim3>
__device__ dim3 maskedDims(const _dim3& dim) {
return dim3{
X ? (unsigned)dim.x : 1U,
Y ? (unsigned)dim.y : 1U,
Z ? (unsigned)dim.z : 1U};
}
// Provides total size of dim with masking, those dims == false do not
// participate in the size calculation
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ nvfuser_index_t maskedSize(const _dim3& dim) {
return size(maskedDims<X_BLOCK, Y_BLOCK, Z_BLOCK>(dim));
}
// Checks if provided idx is zero on those dims == true
template <bool X, bool Y, bool Z, typename _dim3>
__device__ bool maskedIsZero(const _dim3& idx) {
bool isZero = true;
if (X)
isZero = isZero && idx.x == 0;
if (Y)
isZero = isZero && idx.y == 0;
if (Z)
isZero = isZero && idx.z == 0;
return isZero;
}
// Checks if provided idx is zero on those dims == true
template <bool X, bool Y, bool Z, typename _dim3, typename _dim3_2>
__device__ bool maskedIsLast(const _dim3& idx, const _dim3_2& dim) {
bool isZero = true;
if (X)
isZero = isZero && idx.x == dim.x - 1;
if (Y)
isZero = isZero && idx.y == dim.y - 1;
if (Z)
isZero = isZero && idx.z == dim.z - 1;
return isZero;
}
} // namespace index_utils
### |
namespace index_utils {
// Utility functions
// Total size of provided dimension
template <typename _dim3>
__device__ __forceinline__ nvfuser_index_t size(const _dim3& d) {
return (nvfuser_index_t)d.x * (nvfuser_index_t)d.y * (nvfuser_index_t)d.z;
}
// Linearized indexing of idx based on dim, if bool==false that dimension does
// not participate
template <bool X, bool Y, bool Z, typename _dim3, typename _dim3_2>
__device__ nvfuser_index_t maskedOffset(const _dim3& idx, const _dim3_2& dim) {
nvfuser_index_t offset = 0;
if (Z)
offset += idx.z;
if (Y)
offset = offset * dim.y + idx.y;
if (X)
offset = offset * dim.x + idx.x;
return offset;
}
// Linearized indexing of idx based on dim. All dimensions participate.
template <typename _dim3, typename _dim3_2>
__device__ nvfuser_index_t offset(const _dim3& idx, const _dim3_2& dim) {
nvfuser_index_t offset = idx.z;
offset = offset * dim.y + idx.y;
offset = offset * dim.x + idx.x;
return offset;
}
// Masks the provided dim3, those == false get truncated to 1
template <bool X, bool Y, bool Z, typename _dim3>
__device__ dim3 maskedDims(const _dim3& dim) {
return dim3{
X ? (unsigned)dim.x : 1U,
Y ? (unsigned)dim.y : 1U,
Z ? (unsigned)dim.z : 1U};
}
// Provides total size of dim with masking, those dims == false do not
// participate in the size calculation
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ nvfuser_index_t maskedSize(const _dim3& dim) {
return size(maskedDims<X_BLOCK, Y_BLOCK, Z_BLOCK>(dim));
}
// Checks if provided idx is zero on those dims == true
template <bool X, bool Y, bool Z, typename _dim3>
__device__ bool maskedIsZero(const _dim3& idx) {
bool isZero = true;
if (X)
isZero = isZero && idx.x == 0;
if (Y)
isZero = isZero && idx.y == 0;
if (Z)
isZero = isZero && idx.z == 0;
return isZero;
}
// Checks if provided idx is zero on those dims == true
template <bool X, bool Y, bool Z, typename _dim3, typename _dim3_2>
__device__ bool maskedIsLast(const _dim3& idx, const _dim3_2& dim) {
bool isZero = true;
if (X)
isZero = isZero && idx.x == dim.x - 1;
if (Y)
isZero = isZero && idx.y == dim.y - 1;
if (Z)
isZero = isZero && idx.z == dim.z - 1;
return isZero;
}
} // namespace index_utils
### |
__device__ unsigned int mulhilo32(
unsigned int a, unsigned int b, unsigned int* result_high) {
*result_high = __umulhi(a, b);
return a * b;
}
__device__ uint4 single_round(uint4 ctr, uint2 key) {
constexpr unsigned long kPhiloxSA = 0xD2511F53;
constexpr unsigned long kPhiloxSB = 0xCD9E8D57;
unsigned int hi0;
unsigned int hi1;
unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
return ret;
}
__device__ uint4 philox(
unsigned long long seed, unsigned long long subsequence, unsigned long long offset) {
constexpr unsigned long kPhilox10A = 0x9E3779B9;
constexpr unsigned long kPhilox10B = 0xBB67AE85;
uint2 key = {};
key.x = (unsigned int)seed;
key.y = (unsigned int)(seed >> 32);
uint4 counter = make_uint4(0, 0, 0, 0);
counter.x = (unsigned int)(offset);
counter.y = (unsigned int)(offset >> 32);
counter.z = (unsigned int)(subsequence);
counter.w = (unsigned int)(subsequence >> 32);
uint4 output = {};
uint2 key_ = key;
uint4 counter_ = counter;
for (int i = 0; i < 9; i++) {
counter_ = single_round(counter_, key_);
key_.x += (kPhilox10A);
key_.y += (kPhilox10B);
}
output = single_round(counter_, key_);
return output;
}
__device__ float uniformf(unsigned int x) {
constexpr float kRanInvM32 = 2.3283064e-10f;
float result = x * kRanInvM32;
return result == 1 ? 0.0f : result;
}
__device__ double uniform(unsigned int x, unsigned int y) {
constexpr double kRan2Pow53Inv = 1.1102230246251565e-16;
const unsigned long long z =
(unsigned long long)x ^ ((unsigned long long)y << (53 - 32));
double result = z * kRan2Pow53Inv + (kRan2Pow53Inv / 2.0);
return result == 1 ? 0.0 : result;
}
__device__ double rng_uniform(const uint4& rng_result, int rng_component) {
return uniform(
(&rng_result.x)[rng_component * 2], (&rng_result.x)[rng_component * 2 + 1]);
}
__device__ float rng_uniformf(const uint4& rng_result, int rng_component) {
return uniformf((&rng_result.x)[rng_component]);
}
__device__ double rng_uniform_range(
const uint4& rng_result, int rng_component, double from, double to) {
auto range = to - from;
auto uniform01 = rng_uniform(rng_result, rng_component);
return from + range * uniform01;
}
__device__ float rng_uniform_rangef(
const uint4& rng_result, int rng_component, float from, float to) {
auto range = to - from;
auto uniform01 = rng_uniformf(rng_result, rng_component);
return from + range * uniform01;
}### |
__device__ unsigned int mulhilo32(
unsigned int a,
unsigned int b,
unsigned int* result_high) {
*result_high = __umulhi(a, b);
return a * b;
}
__device__ uint4 single_round(uint4 ctr, uint2 key) {
constexpr unsigned long kPhiloxSA = 0xD2511F53;
constexpr unsigned long kPhiloxSB = 0xCD9E8D57;
unsigned int hi0;
unsigned int hi1;
unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
return ret;
}
__device__ uint4 philox(
unsigned long long seed,
unsigned long long subsequence,
unsigned long long offset) {
constexpr unsigned long kPhilox10A = 0x9E3779B9;
constexpr unsigned long kPhilox10B = 0xBB67AE85;
uint2 key = {};
key.x = (unsigned int)seed;
key.y = (unsigned int)(seed >> 32);
uint4 counter = make_uint4(0, 0, 0, 0);
counter.x = (unsigned int)(offset);
counter.y = (unsigned int)(offset >> 32);
counter.z = (unsigned int)(subsequence);
counter.w = (unsigned int)(subsequence >> 32);
uint4 output = {};
uint2 key_ = key;
uint4 counter_ = counter;
for (int i = 0; i < 9; i++) {
counter_ = single_round(counter_, key_);
key_.x += (kPhilox10A);
key_.y += (kPhilox10B);
}
output = single_round(counter_, key_);
return output;
}
__device__ float uniformf(unsigned int x) {
constexpr float kRanInvM32 = 2.3283064e-10f; // Inverse of 2^32.
float result = x * kRanInvM32;
return result == 1 ? 0.0f : result;
}
__device__ double uniform(unsigned int x, unsigned int y) {
constexpr double kRan2Pow53Inv = 1.1102230246251565e-16;
const unsigned long long z =
(unsigned long long)x ^ ((unsigned long long)y << (53 - 32));
double result = z * kRan2Pow53Inv + (kRan2Pow53Inv / 2.0);
return result == 1 ? 0.0 : result;
}
__device__ double rng_uniform(const uint4& rng_result, int rng_component) {
return uniform(
(&rng_result.x)[rng_component * 2],
(&rng_result.x)[rng_component * 2 + 1]);
}
__device__ float rng_uniformf(const uint4& rng_result, int rng_component) {
return uniformf((&rng_result.x)[rng_component]);
}
__device__ double rng_uniform_range(
const uint4& rng_result,
int rng_component,
double from,
double to) {
auto range = to - from;
auto uniform01 = rng_uniform(rng_result, rng_component);
return from + range * uniform01;
}
__device__ float rng_uniform_rangef(
const uint4& rng_result,
int rng_component,
float from,
float to) {
auto range = to - from;
auto uniform01 = rng_uniformf(rng_result, rng_component);
return from + range * uniform01;
}
### |
// !!! This is a file automatically generated by hipify!!!
// Utility macro for this file
#define DEVICE_INLINE __device__ inline
// Utility class for 2D swizzle:
template <typename index_t>
struct IndexGeneric {
const index_t x = 0, y = 0;
DEVICE_INLINE IndexGeneric(index_t x_, index_t y_) : x(x_), y(y_) {}
};
// Default type for integration
using Index2D = IndexGeneric<nvfuser_index_t>;
// Small type for unit computation
using Index2DInt = IndexGeneric<int>;
// ------------------------------------------------------------
// Swizzle Definitions
// for each swizzle name:
// un(Swizzle Name) e.g. unZShape is the inverse of ZShape,
// (unswizzle is needed for inlining and is currently not actively used.)
// ------------------------------------------------------------
// Unit Z swizzle:
// Alternate directions of Y dimension:
// 1 2 3 1 2 3
// 4 5 6 => 6 5 4
// 7 8 9 7 8 9
DEVICE_INLINE Index2D ZShape(Index2D in, Index2D unit_dim) {
return Index2D(in.x, in.x % 2 == 0 ? in.y : (unit_dim.y - in.y - 1));
}
// ZShape is inverse of itself
DEVICE_INLINE Index2D unZShape(Index2D in, Index2D unit_dim) {
return ZShape(in, unit_dim);
}
// Block cyclic Xor swizzle: (bank conflict removal)
// Apply cyclic Xor within blocks:
// Example: cyclic Xor
// 1 2 3 4 1 2 3 4
// 5 6 7 8 6 5 8 7
// 9 10 11 12 => 11 12 9 10
// 13 14 15 16 16 15 14 13
// Note:
DEVICE_INLINE Index2D Xor(Index2D in, Index2DInt unit_dim) {
// Need to validate in swizzle configuration:
// unit_dim.x == unit_dim.y
return Index2D(in.x, (in.y ^ in.x));
}
// Inverse of Xor is itself
DEVICE_INLINE Index2D unXor(Index2D in, Index2DInt unit_dim) {
return Xor(in, unit_dim);
}
// Scatter swizzle:
// Corresponds to the data layout out of ldmatrix intrinsic.
// supported dimensions are : 8x4, 16x4, 32x4
template <int row_size>
DEVICE_INLINE Index2D Scatter(Index2D in) {
static_assert(row_size == 8 || row_size == 16 || row_size == 32);
return Index2D((in.y * row_size + in.x) / 4, in.x % 4);
}
template <int row_size>
DEVICE_INLINE Index2D unScatter(Index2D in) {
static_assert(row_size == 8 || row_size == 16 || row_size == 32);
return Index2D(in.y + (in.x % (row_size / 4)) * 4, in.x / (row_size / 4));
}
#undef DEVICE_INLINE
### |
// Utility macro for this file
#define DEVICE_INLINE __device__ inline
// Utility class for 2D swizzle:
template <typename index_t>
struct IndexGeneric {
const index_t x = 0, y = 0;
DEVICE_INLINE IndexGeneric(index_t x_, index_t y_) : x(x_), y(y_) {}
};
// Default type for integration
using Index2D = IndexGeneric<nvfuser_index_t>;
// Small type for unit computation
using Index2DInt = IndexGeneric<int>;
// ------------------------------------------------------------
// Swizzle Definitions
// for each swizzle name:
// un(Swizzle Name) e.g. unZShape is the inverse of ZShape,
// (unswizzle is needed for inlining and is currently not actively used.)
// ------------------------------------------------------------
// Unit Z swizzle:
// Alternate directions of Y dimension:
// 1 2 3 1 2 3
// 4 5 6 => 6 5 4
// 7 8 9 7 8 9
DEVICE_INLINE Index2D ZShape(Index2D in, Index2D unit_dim) {
return Index2D(in.x, in.x % 2 == 0 ? in.y : (unit_dim.y - in.y - 1));
}
// ZShape is inverse of itself
DEVICE_INLINE Index2D unZShape(Index2D in, Index2D unit_dim) {
return ZShape(in, unit_dim);
}
// Block cyclic Xor swizzle: (bank conflict removal)
// Apply cyclic Xor within blocks:
// Example: cyclic Xor
// 1 2 3 4 1 2 3 4
// 5 6 7 8 6 5 8 7
// 9 10 11 12 => 11 12 9 10
// 13 14 15 16 16 15 14 13
// Note:
DEVICE_INLINE Index2D Xor(Index2D in, Index2DInt unit_dim) {
// Need to validate in swizzle configuration:
// unit_dim.x == unit_dim.y
return Index2D(in.x, (in.y ^ in.x));
}
// Inverse of Xor is itself
DEVICE_INLINE Index2D unXor(Index2D in, Index2DInt unit_dim) {
return Xor(in, unit_dim);
}
// Scatter swizzle:
// Corresponds to the data layout out of ldmatrix intrinsic.
// supported dimensions are : 8x4, 16x4, 32x4
template <int row_size>
DEVICE_INLINE Index2D Scatter(Index2D in) {
static_assert(row_size == 8 || row_size == 16 || row_size == 32);
return Index2D((in.y * row_size + in.x) / 4, in.x % 4);
}
template <int row_size>
DEVICE_INLINE Index2D unScatter(Index2D in) {
static_assert(row_size == 8 || row_size == 16 || row_size == 32);
return Index2D(in.y + (in.x % (row_size / 4)) * 4, in.x / (row_size / 4));
}
#undef DEVICE_INLINE
### |
// !!! This is a file automatically generated by hipify!!!
template <typename T, int N>
struct Tensor {
__device__ T& operator[](nvfuser_index_t ind) {
return data[ind];
};
T* data;
nvfuser_index_t size[N];
nvfuser_index_t stride[N];
};
// Specialization for 0-dim case as it does not need size and stride arrays.
// They will be an error as well since zero-length arrays are not allowed.
template <typename T>
struct Tensor<T, 0> {
__device__ T& operator[](nvfuser_index_t) {
return *data;
};
T* data;
};
// Specialization for 0-dim case that's easy to pass in a CPU based tensor.
template <typename T>
struct CpuScalarTensor {
__device__ T& operator[](int) {
return data;
};
T data;
};
### |
template <typename T, int N>
struct Tensor {
__device__ T& operator[](nvfuser_index_t ind) {
return data[ind];
};
T* data;
nvfuser_index_t size[N];
nvfuser_index_t stride[N];
};
// Specialization for 0-dim case as it does not need size and stride arrays.
// They will be an error as well since zero-length arrays are not allowed.
template <typename T>
struct Tensor<T, 0> {
__device__ T& operator[](nvfuser_index_t) {
return *data;
};
T* data;
};
// Specialization for 0-dim case that's easy to pass in a CPU based tensor.
template <typename T>
struct CpuScalarTensor {
__device__ T& operator[](int) {
return data;
};
T data;
};
### |
// !!! This is a file automatically generated by hipify!!!
// Type trait utils
template <typename Type, bool is_volatile>
struct MaybeVolatile;
template <typename Type>
struct MaybeVolatile<Type, true> {
using type = volatile Type;
};
template <typename Type>
struct MaybeVolatile<Type, false> {
using type = Type;
};
template <typename... Types>
struct TypeList {};
template <int idx, typename T, typename... Types>
struct TypeSelector {
using type = typename TypeSelector<idx - 1, Types...>::type;
};
template <typename T, typename... Types>
struct TypeSelector<0, T, Types...> {
using type = T;
};
template <typename T0, typename T1>
struct IsSameType {
static constexpr bool value = false;
};
template <typename T0>
struct IsSameType<T0, T0> {
static constexpr bool value = true;
};
template <typename T>
struct IsPointerType {
static constexpr bool value = false;
};
template <typename T>
struct IsPointerType<T*> {
static constexpr bool value = true;
};
### |
// Type trait utils
template <typename Type, bool is_volatile>
struct MaybeVolatile;
template <typename Type>
struct MaybeVolatile<Type, true> {
using type = volatile Type;
};
template <typename Type>
struct MaybeVolatile<Type, false> {
using type = Type;
};
template <typename... Types>
struct TypeList {};
template <int idx, typename T, typename... Types>
struct TypeSelector {
using type = typename TypeSelector<idx - 1, Types...>::type;
};
template <typename T, typename... Types>
struct TypeSelector<0, T, Types...> {
using type = T;
};
template <typename T0, typename T1>
struct IsSameType {
static constexpr bool value = false;
};
template <typename T0>
struct IsSameType<T0, T0> {
static constexpr bool value = true;
};
template <typename T>
struct IsPointerType {
static constexpr bool value = false;
};
template <typename T>
struct IsPointerType<T*> {
static constexpr bool value = true;
};
### |
// !!! This is a file automatically generated by hipify!!!
namespace warp {
template <
bool SINGLE_WARP,
typename T,
typename Func,
typename _dim3ti,
typename _dim3bd>
__device__ void warpReduceTIDX(
T& out,
const T& inp_val,
Func reduction_op,
const _dim3ti& thread_idx,
const _dim3bd& block_dim,
T* shared_mem,
bool read_write_pred,
T init_val) {
constexpr int WARP_SIZE = 32;
// Assume input padded to multiples of a warp
T reduce_val = init_val;
// Do warp reduction
if (read_write_pred) {
reduce_val = inp_val;
}
// Reduce within each warp
for (int i = 16; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, WARP_SIZE));
}
// Reduce across warp if needed
// Load value to shared mem
if (!SINGLE_WARP) {
unsigned int warp_idx = thread_idx.x / WARP_SIZE;
unsigned int lane_idx = thread_idx.x % WARP_SIZE;
unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;
bool is_warp_head = lane_idx == 0;
unsigned int reduction_size = block_dim.x;
unsigned int num_of_warps = reduction_size / WARP_SIZE;
unsigned int smem_offset = reduce_group_id * num_of_warps;
block_sync::sync();
if (is_warp_head) {
shared_mem[smem_offset + warp_idx] = reduce_val;
}
block_sync::sync();
if (warp_idx == 0) {
// This assumes num_of_warps will be < 32, meaning < 1024 threads.
// Should be true for long enough.
assert(num_of_warps <= 32);
reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]
: init_val;
// Reduce within warp 0
for (int i = 16; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, 32));
}
}
if (is_warp_head) {
reduction_op(out, reduce_val);
}
} else {
reduction_op(out, reduce_val);
}
}
} // namespace warp
### |
namespace warp {
template <
bool SINGLE_WARP,
typename T,
typename Func,
typename _dim3ti,
typename _dim3bd>
__device__ void warpReduceTIDX(
T& out,
const T& inp_val,
Func reduction_op,
const _dim3ti& thread_idx,
const _dim3bd& block_dim,
T* shared_mem,
bool read_write_pred,
T init_val) {
constexpr int WARP_SIZE = 32;
// Assume input padded to multiples of a warp
T reduce_val = init_val;
// Do warp reduction
if (read_write_pred) {
reduce_val = inp_val;
}
// Reduce within each warp
for (int i = 16; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, WARP_SIZE));
}
// Reduce across warp if needed
// Load value to shared mem
if (!SINGLE_WARP) {
unsigned int warp_idx = thread_idx.x / WARP_SIZE;
unsigned int lane_idx = thread_idx.x % WARP_SIZE;
unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;
bool is_warp_head = lane_idx == 0;
unsigned int reduction_size = block_dim.x;
unsigned int num_of_warps = reduction_size / WARP_SIZE;
unsigned int smem_offset = reduce_group_id * num_of_warps;
block_sync::sync();
if (is_warp_head) {
shared_mem[smem_offset + warp_idx] = reduce_val;
}
block_sync::sync();
if (warp_idx == 0) {
// This assumes num_of_warps will be < 32, meaning < 1024 threads.
// Should be true for long enough.
assert(num_of_warps <= 32);
reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]
: init_val;
// Reduce within warp 0
for (int i = 16; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, 32));
}
}
if (is_warp_head) {
reduction_op(out, reduce_val);
}
} else {
reduction_op(out, reduce_val);
}
}
} // namespace warp
### |
// !!! This is a file automatically generated by hipify!!!
namespace warp {
template <
bool SINGLE_WARP,
typename T,
typename Func,
typename _dim3ti,
typename _dim3bd>
__device__ void warpReduceTIDX(
T& out,
const T& inp_val,
Func reduction_op,
const _dim3ti& thread_idx,
const _dim3bd& block_dim,
T* shared_mem,
bool read_write_pred,
T init_val) {
constexpr int WARP_SIZE = warpSize;
// Assume input padded to multiples of a warp
T reduce_val = init_val;
// Do warp reduction
if (read_write_pred) {
reduce_val = inp_val;
}
// Reduce within each warp
for (int i = WARP_SIZE/2; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));
}
// Reduce across warp if needed
// Load value to shared mem
if (!SINGLE_WARP) {
unsigned int warp_idx = thread_idx.x / WARP_SIZE;
unsigned int lane_idx = thread_idx.x % WARP_SIZE;
unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;
bool is_warp_head = lane_idx == 0;
unsigned int reduction_size = block_dim.x;
unsigned int num_of_warps = reduction_size / WARP_SIZE;
unsigned int smem_offset = reduce_group_id * num_of_warps;
block_sync::sync();
if (read_write_pred && is_warp_head) {
shared_mem[smem_offset + warp_idx] = reduce_val;
}
block_sync::sync();
if (warp_idx == 0) {
// This assumes num_of_warps will be < 32, meaning < 1024 threads.
// Should be true for long enough.
assert(num_of_warps <= 32);
reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]
: init_val;
// Reduce within warp 0
for (int i = WARP_SIZE/2; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));
}
}
if (is_warp_head) {
reduction_op(out, reduce_val);
}
} else {
reduction_op(out, reduce_val);
}
}
} // namespace warp
### |
namespace warp {
template <
bool SINGLE_WARP,
typename T,
typename Func,
typename _dim3ti,
typename _dim3bd>
__device__ void warpReduceTIDX(
T& out,
const T& inp_val,
Func reduction_op,
const _dim3ti& thread_idx,
const _dim3bd& block_dim,
T* shared_mem,
bool read_write_pred,
T init_val) {
constexpr int WARP_SIZE = warpSize;
// Assume input padded to multiples of a warp
T reduce_val = init_val;
// Do warp reduction
if (read_write_pred) {
reduce_val = inp_val;
}
// Reduce within each warp
for (int i = WARP_SIZE/2; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));
}
// Reduce across warp if needed
// Load value to shared mem
if (!SINGLE_WARP) {
unsigned int warp_idx = thread_idx.x / WARP_SIZE;
unsigned int lane_idx = thread_idx.x % WARP_SIZE;
unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;
bool is_warp_head = lane_idx == 0;
unsigned int reduction_size = block_dim.x;
unsigned int num_of_warps = reduction_size / WARP_SIZE;
unsigned int smem_offset = reduce_group_id * num_of_warps;
block_sync::sync();
if (read_write_pred && is_warp_head) {
shared_mem[smem_offset + warp_idx] = reduce_val;
}
block_sync::sync();
if (warp_idx == 0) {
// This assumes num_of_warps will be < 32, meaning < 1024 threads.
// Should be true for long enough.
assert(num_of_warps <= 32);
reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]
: init_val;
// Reduce within warp 0
for (int i = WARP_SIZE/2; i >= 1; i /= 2) {
reduction_op(
reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));
}
}
if (is_warp_head) {
reduction_op(out, reduce_val);
}
} else {
reduction_op(out, reduce_val);
}
}
} // namespace warp
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char bessel_j1_name[] = "bessel_j1_forward";
void bessel_j1_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j1_hip", [&]() {
jitted_gpu_kernel<bessel_j1_name, scalar_t, scalar_t, 1>(iterator, bessel_j1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j1_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_j1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
} // anonymous namespace
REGISTER_DISPATCH(special_bessel_j1_stub, &bessel_j1_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_j1_name[] = "bessel_j1_forward";
void bessel_j1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j1_cuda", [&]() {
jitted_gpu_kernel<bessel_j1_name, scalar_t, scalar_t, 1>(iterator, bessel_j1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_j1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_j1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
} // anonymous namespace
REGISTER_DISPATCH(special_bessel_j1_stub, &bessel_j1_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char bessel_y0_name[] = "bessel_y0_forward";
void bessel_y0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_hip", [&]() {
jitted_gpu_kernel<bessel_y0_name, scalar_t, scalar_t, 1>(iterator, bessel_y0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_y0_name[] = "bessel_y0_forward";
void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() {
jitted_gpu_kernel<bessel_y0_name, scalar_t, scalar_t, 1>(iterator, bessel_y0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char bessel_y1_name[] = "bessel_y1_forward";
void bessel_y1_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_hip", [&]() {
jitted_gpu_kernel<bessel_y1_name, scalar_t, scalar_t, 1>(iterator, bessel_y1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char bessel_y1_name[] = "bessel_y1_forward";
void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() {
jitted_gpu_kernel<bessel_y1_name, scalar_t, scalar_t, 1>(iterator, bessel_y1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return bessel_y1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
template<typename scalar_t>
struct BitwiseAndFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a & b;
}
};
template<>
struct BitwiseAndFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void bitwise_and_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_and_hip", [&]() {
BitwiseAndFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseOrFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a | b;
}
};
template<>
struct BitwiseOrFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a || b;
}
};
void bitwise_or_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_or_hip", [&]() {
BitwiseOrFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseXorFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a ^ b;
}
};
template<>
struct BitwiseXorFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a != b;
}
};
void bitwise_xor_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_xor_hip", [&]() {
BitwiseXorFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_hip);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_hip);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
template<typename scalar_t>
struct BitwiseAndFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a & b;
}
};
template<>
struct BitwiseAndFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void bitwise_and_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_and_cuda", [&]() {
BitwiseAndFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseOrFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a | b;
}
};
template<>
struct BitwiseOrFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a || b;
}
};
void bitwise_or_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_or_cuda", [&]() {
BitwiseOrFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
template<typename scalar_t>
struct BitwiseXorFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a ^ b;
}
};
template<>
struct BitwiseXorFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a != b;
}
};
void bitwise_xor_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_xor_cuda", [&]() {
BitwiseXorFunctor<scalar_t> f;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(iter, f);
});
}
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\BinaryInternal.h>
#include <c10/hip/HIPGuard.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/hip\BinaryInternal.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <type_traits>
namespace at::native {
namespace binary_internal {
void div_floor_kernel_hip(TensorIteratorBase& iter) {
const auto dtype = iter.common_dtype();
if (dtype == kByte) {
return div_trunc_kernel_hip(iter);
} else if (isIntegralType(dtype, false)) {
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_floor_hip", [&]() {
gpu_kernel_with_scalars(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return div_floor_integer(a, b);
});
});
} else if (iter.is_cpu_scalar(2)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_floor_hip", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto b = iter.scalar_value<accscalar_t>(2);
if (C10_UNLIKELY(b == 0)) {
return div_true_kernel_hip(iter);
}
auto inv_b = accscalar_t(1.0) / b;
iter.remove_operand(2);
gpu_kernel(iter, [b, inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {
auto mod = ::fmod(a, b);
auto div = (a - mod) * inv_b;
if ((mod != 0) && (b < 0) != (mod < 0)) {
div -= scalar_t(1);
}
scalar_t floordiv;
if (div != 0) {
floordiv = ::floor(div);
if (div - floordiv > scalar_t(0.5)) {
floordiv += scalar_t(1.0);
}
} else {
floordiv = c10::hip::compat::copysign(scalar_t(0), a * inv_b);
}
return floordiv;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_floor_hip", [&]() {
gpu_kernel_with_scalars(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return div_floor_floating(a, b);
});
});
}
}
}
REGISTER_DISPATCH(div_floor_stub, &binary_internal::div_floor_kernel_hip);
} ### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/BinaryInternal.h>
#include <c10/cuda/CUDAGuard.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/cuda/BinaryInternal.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <type_traits>
namespace at::native {
namespace binary_internal {
void div_floor_kernel_cuda(TensorIteratorBase& iter) {
const auto dtype = iter.common_dtype();
if (dtype == kByte) {
return div_trunc_kernel_cuda(iter);
} else if (isIntegralType(dtype, false)) {
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_floor_cuda", [&]() {
gpu_kernel_with_scalars(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return div_floor_integer(a, b);
});
});
} else if (iter.is_cpu_scalar(2)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_floor_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto b = iter.scalar_value<accscalar_t>(2);
if (C10_UNLIKELY(b == 0)) {
return div_true_kernel_cuda(iter);
}
auto inv_b = accscalar_t(1.0) / b;
iter.remove_operand(2);
gpu_kernel(iter, [b, inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {
auto mod = std::fmod(a, b);
auto div = (a - mod) * inv_b;
if ((mod != 0) && (b < 0) != (mod < 0)) {
div -= scalar_t(1);
}
scalar_t floordiv;
if (div != 0) {
floordiv = std::floor(div);
if (div - floordiv > scalar_t(0.5)) {
floordiv += scalar_t(1.0);
}
} else {
floordiv = c10::cuda::compat::copysign(scalar_t(0), a * inv_b);
}
return floordiv;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_floor_cuda", [&]() {
gpu_kernel_with_scalars(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return div_floor_floating(a, b);
});
});
}
}
}
REGISTER_DISPATCH(div_floor_stub, &binary_internal::div_floor_kernel_cuda);
}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <c10/hip/HIPGuard.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/hip\BinaryInternal.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <type_traits>
namespace at::native {
namespace binary_internal {
CONSTEXPR_EXCEPT_WIN_HIP char div_name[] = "div_kernel";
void div_true_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (iter.common_dtype() == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
#if AT_USE_JITERATOR()
static const auto div_string = jiterator_stringify(
template <typename T> T div_kernel(T a, T b) { return a / b; });
opmath_jitted_gpu_kernel_with_scalars<div_name, scalar_t, scalar_t>(
iter, div_string);
#else
using opmath_t = at::opmath_type<scalar_t>;
opmath_gpu_kernel_with_scalars<scalar_t>(iter, DivFunctor<opmath_t>());
#endif
return;
}
if (iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "div_true_hip", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto inv_b = opmath_t(1.0) / iter.scalar_value<opmath_t>(2);
iter.remove_operand(2);
gpu_kernel(
iter,
BUnaryFunctor<scalar_t, scalar_t, scalar_t, MulFunctor<opmath_t>>(
MulFunctor<opmath_t>(), inv_b));
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "div_true_hip", [&]() {
DivFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
}
} // namespace binary_internal
REGISTER_DISPATCH(div_true_stub, &binary_internal::div_true_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <c10/cuda/CUDAGuard.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/cuda/BinaryInternal.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <type_traits>
namespace at::native {
namespace binary_internal {
CONSTEXPR_EXCEPT_WIN_CUDA char div_name[] = "div_kernel";
void div_true_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (iter.common_dtype() == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
#if AT_USE_JITERATOR()
static const auto div_string = jiterator_stringify(
template <typename T> T div_kernel(T a, T b) { return a / b; });
opmath_jitted_gpu_kernel_with_scalars<div_name, scalar_t, scalar_t>(
iter, div_string);
#else
using opmath_t = at::opmath_type<scalar_t>;
opmath_gpu_kernel_with_scalars<scalar_t>(iter, DivFunctor<opmath_t>());
#endif
return;
}
if (iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "div_true_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto inv_b = opmath_t(1.0) / iter.scalar_value<opmath_t>(2);
iter.remove_operand(2);
gpu_kernel(
iter,
BUnaryFunctor<scalar_t, scalar_t, scalar_t, MulFunctor<opmath_t>>(
MulFunctor<opmath_t>(), inv_b));
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "div_true_cuda", [&]() {
DivFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
}
} // namespace binary_internal
REGISTER_DISPATCH(div_true_stub, &binary_internal::div_true_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <c10/hip/HIPGuard.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <type_traits>
namespace at::native {
namespace binary_internal {
void div_trunc_kernel_hip(TensorIteratorBase& iter) {
auto dtype = iter.common_dtype();
if (isIntegralType(dtype, /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_trunc_hip", [&]() {
gpu_kernel_with_scalars(
iter,
[] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / b; });
});
} else if (iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_trunc_hip", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto inv_b = accscalar_t(1.0) / iter.scalar_value<accscalar_t>(2);
iter.remove_operand(2);
gpu_kernel(iter, [inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::trunc(a * inv_b);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_trunc_hip", [&]() {
gpu_kernel_with_scalars(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return std::trunc(a / b);
});
});
}
}
} // namespace binary_internal
REGISTER_DISPATCH(div_trunc_stub, &binary_internal::div_trunc_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <c10/cuda/CUDAGuard.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <type_traits>
namespace at::native {
namespace binary_internal {
void div_trunc_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.common_dtype();
if (isIntegralType(dtype, /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_trunc_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / b; });
});
} else if (iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_trunc_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto inv_b = accscalar_t(1.0) / iter.scalar_value<accscalar_t>(2);
iter.remove_operand(2);
gpu_kernel(iter, [inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::trunc(a * inv_b);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, dtype, "div_trunc_cuda", [&]() {
gpu_kernel_with_scalars(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return std::trunc(a / b);
});
});
}
}
} // namespace binary_internal
REGISTER_DISPATCH(div_trunc_stub, &binary_internal::div_trunc_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void atan2_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "atan2_hip",
[&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void hypot_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "hypot_hip",
[&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::hypot(a, b);
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_hip);
REGISTER_DISPATCH(hypot_stub, &hypot_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void atan2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "atan2_cuda",
[&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void hypot_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "hypot_cuda",
[&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::hypot(a, b);
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(hypot_stub, &hypot_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\BinaryInternal.h>
#include <c10/hip/HIPGuard.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <type_traits>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
CONSTEXPR_EXCEPT_WIN_HIP char mul_name[] = "mul_kernel";
void mul_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (common_dtype == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
#if AT_USE_JITERATOR()
static const auto mul_string = jiterator_stringify(
template <typename T> T mul_kernel(T a, T b) { return a * b; });
opmath_jitted_gpu_kernel_with_scalars<mul_name, scalar_t, scalar_t>(
iter, mul_string);
#else
using opmath_t = at::opmath_type<scalar_t>;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, binary_internal::MulFunctor<opmath_t>());
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBFloat16, kBool, iter.common_dtype(), "mul_hip", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, binary_internal::MulFunctor<opmath_t>());
});
}
}
REGISTER_DISPATCH(mul_stub, &mul_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/BinaryInternal.h>
#include <c10/cuda/CUDAGuard.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <type_traits>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char mul_name[] = "mul_kernel";
void mul_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (common_dtype == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
#if AT_USE_JITERATOR()
static const auto mul_string = jiterator_stringify(
template <typename T> T mul_kernel(T a, T b) { return a * b; });
opmath_jitted_gpu_kernel_with_scalars<mul_name, scalar_t, scalar_t>(
iter, mul_string);
#else
using opmath_t = at::opmath_type<scalar_t>;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, binary_internal::MulFunctor<opmath_t>());
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kHalf, kBFloat16, kBool, iter.common_dtype(), "mul_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, binary_internal::MulFunctor<opmath_t>());
});
}
}
REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\cub.cuh>
#include <ATen/hip\HIPConfig.h>
namespace at {
namespace hip {
namespace cub {
namespace {
template <typename scalar_t>
struct SumOp {
__device__ scalar_t operator () (scalar_t a, scalar_t b) const {
return a + b;
}
};
}
template <typename input_t, typename output_t>
void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t num_items) {
using NO_ROCM(at_hip_detail)::hipcub::Sum;
inclusive_scan(input, output, Sum{}, num_items);
}
template void inclusive_sum_truncating(const int32_t *input, int32_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int64_t *input, int64_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int32_t *input, int64_t *output, int64_t num_items);
template <typename input_t, typename output_t>
void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t num_items) {
using scalar_t = std::common_type_t<input_t, output_t>;
exclusive_scan(input, output, SumOp<scalar_t>{}, scalar_t(0), num_items);
}
template void exclusive_sum_in_common_type(const int32_t *input, int32_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const int64_t *input, int64_t *output, int64_t num_items);
namespace {
struct CountMaskOp {
__device__ int64_t operator() (const uint8_t &x) const {
return x != 0;
}
};
}
void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n) {
CountMaskOp op{};
auto iter = NO_ROCM(at_hip_detail)::hipcub::TransformInputIterator<
bool, decltype(op), decltype(mask)>(mask, op);
exclusive_scan(iter, output_idx, SumOp<int64_t>{}, int64_t{0}, n);
}
}}} // namespace at::cuda::cub
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/CUDAConfig.h>
namespace at {
namespace cuda {
namespace cub {
namespace {
template <typename scalar_t>
struct SumOp {
__device__ scalar_t operator () (scalar_t a, scalar_t b) const {
return a + b;
}
};
}
template <typename input_t, typename output_t>
void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t num_items) {
using NO_ROCM(at_cuda_detail)::cub::Sum;
inclusive_scan(input, output, Sum{}, num_items);
}
template void inclusive_sum_truncating(const int32_t *input, int32_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int64_t *input, int64_t *output, int64_t num_items);
template void inclusive_sum_truncating(const int32_t *input, int64_t *output, int64_t num_items);
template <typename input_t, typename output_t>
void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t num_items) {
using scalar_t = std::common_type_t<input_t, output_t>;
exclusive_scan(input, output, SumOp<scalar_t>{}, scalar_t(0), num_items);
}
template void exclusive_sum_in_common_type(const int32_t *input, int32_t *output, int64_t num_items);
template void exclusive_sum_in_common_type(const int64_t *input, int64_t *output, int64_t num_items);
namespace {
struct CountMaskOp {
__device__ int64_t operator() (const uint8_t &x) const {
return x != 0;
}
};
}
void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n) {
CountMaskOp op{};
auto iter = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator<
bool, decltype(op), decltype(mask)>(mask, op);
exclusive_scan(iter, output_idx, SumOp<int64_t>{}, int64_t{0}, n);
}
}}} // namespace at::cuda::cub
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/TensorIterator.h>
#include <c10/util/TypeSafeSignMath.h>
#include <type_traits>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void remainder_kernel_hip(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "remainder_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if (r != 0 && c10::signs_differ(r, b)) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "remainder_hip", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
auto mod = ::fmod(a, b);
if (mod != 0 && c10::signs_differ(b, mod)) {
mod += b;
}
return mod;
});
});
}
}
void fmod_kernel_hip(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "fmod_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a % b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "fmod_hip", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return ::fmod(a, b);
});
});
}
}
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_hip);
REGISTER_DISPATCH(fmod_stub, &fmod_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/TensorIterator.h>
#include <c10/util/TypeSafeSignMath.h>
#include <type_traits>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void remainder_kernel_cuda(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if (r != 0 && c10::signs_differ(r, b)) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
auto mod = ::fmod(a, b);
if (mod != 0 && c10::signs_differ(b, mod)) {
mod += b;
}
return mod;
});
});
}
}
void fmod_kernel_cuda(TensorIteratorBase& iter) {
if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a % b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "fmod_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return ::fmod(a, b);
});
});
}
}
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void lshift_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_hip", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
void rshift_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_hip", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_hip);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void lshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
void rshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
} // namespace at::native
### |
#include "hip/hip_runtime.h"
#pragma once
#include <thrust/tuple.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/hip\DeviceUtils.cuh>
namespace at {
namespace native {
namespace hip_utils {
constexpr int kHIPBlockReduceNumThreads = 512;
constexpr int kHIPBlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE;
template <typename T>
__inline__ __device__ T WarpReduceSum(T val) {
#pragma unroll
for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
val += WARP_SHFL_DOWN(val, offset);
}
return val;
}
struct Block1D {
static __forceinline__ __device__ int Tid() { return threadIdx.x; }
static __forceinline__ __device__ int Warps() {
return blockDim.x / C10_WARP_SIZE;
}
};
struct Block2D {
static __forceinline__ __device__ int Tid() {
return threadIdx.x + threadIdx.y * blockDim.x;
}
static __forceinline__ __device__ int Warps() {
return blockDim.x * blockDim.y / C10_WARP_SIZE;
}
};
template <typename T, typename B = Block1D>
__inline__ __device__ T BlockReduceSum(T val, T* shared) {
const int tid = B::Tid();
const int lid = tid % C10_WARP_SIZE;
const int wid = tid / C10_WARP_SIZE;
val = WarpReduceSum(val);
__syncthreads();
if (lid == 0) {
shared[wid] = val;
}
__syncthreads();
val = (tid < B::Warps()) ? shared[lid] : T(0);
if (wid == 0) {
val = WarpReduceSum(val);
}
return val;
}
template <typename T, class ReduceOp>
__inline__ __device__ T WarpReduce(T val, const ReduceOp& op) {
#pragma unroll
for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
val = op.combine(val, op.warp_shfl_down(val, offset));
}
return val;
}
template <typename T, class ReduceOp, typename B = Block1D>
__inline__ __device__ T
BlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) {
const int tid = B::Tid();
const int lid = tid % C10_WARP_SIZE;
const int wid = tid / C10_WARP_SIZE;
val = WarpReduce(val, op);
__syncthreads();
if (lid == 0) {
shared[wid] = val;
}
__syncthreads();
val = (tid < B::Warps()) ? shared[lid] : identity_element;
if (wid == 0) {
val = WarpReduce(val, op);
}
return val;
}
}
}
} ### |
#pragma once
#include <thrust/tuple.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/cuda/DeviceUtils.cuh>
namespace at {
namespace native {
namespace cuda_utils {
constexpr int kCUDABlockReduceNumThreads = 512;
constexpr int kCUDABlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE;
template <typename T>
__inline__ __device__ T WarpReduceSum(T val) {
#pragma unroll
for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
val += WARP_SHFL_DOWN(val, offset);
}
return val;
}
struct Block1D {
static __forceinline__ __device__ int Tid() { return threadIdx.x; }
static __forceinline__ __device__ int Warps() {
return blockDim.x / C10_WARP_SIZE;
}
};
struct Block2D {
static __forceinline__ __device__ int Tid() {
return threadIdx.x + threadIdx.y * blockDim.x;
}
static __forceinline__ __device__ int Warps() {
return blockDim.x * blockDim.y / C10_WARP_SIZE;
}
};
template <typename T, typename B = Block1D>
__inline__ __device__ T BlockReduceSum(T val, T* shared) {
const int tid = B::Tid();
const int lid = tid % C10_WARP_SIZE;
const int wid = tid / C10_WARP_SIZE;
val = WarpReduceSum(val);
__syncthreads();
if (lid == 0) {
shared[wid] = val;
}
__syncthreads();
val = (tid < B::Warps()) ? shared[lid] : T(0);
if (wid == 0) {
val = WarpReduceSum(val);
}
return val;
}
template <typename T, class ReduceOp>
__inline__ __device__ T WarpReduce(T val, const ReduceOp& op) {
#pragma unroll
for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
val = op.combine(val, op.warp_shfl_down(val, offset));
}
return val;
}
template <typename T, class ReduceOp, typename B = Block1D>
__inline__ __device__ T
BlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) {
const int tid = B::Tid();
const int lid = tid % C10_WARP_SIZE;
const int wid = tid / C10_WARP_SIZE;
val = WarpReduce(val, op);
__syncthreads();
if (lid == 0) {
shared[wid] = val;
}
__syncthreads();
val = (tid < B::Warps()) ? shared[lid] : identity_element;
if (wid == 0) {
val = WarpReduce(val, op);
}
return val;
}
}
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
void chebyshev_polynomial_t_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_t_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward";
void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_t_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_u_name[] = "chebyshev_polynomial_u_forward";
void chebyshev_polynomial_u_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_u_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_u_stub, &chebyshev_polynomial_u_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_u_name[] = "chebyshev_polynomial_u_forward";
void chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_u_stub, &chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_v_name[] = "chebyshev_polynomial_v_forward";
void chebyshev_polynomial_v_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_v_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_v_stub, &chebyshev_polynomial_v_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_v_name[] = "chebyshev_polynomial_v_forward";
void chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_v_stub, &chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_w_name[] = "chebyshev_polynomial_w_forward";
void chebyshev_polynomial_w_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_w_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_w_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_w_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_w_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_w_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_w_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_w_stub, &chebyshev_polynomial_w_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_w_name[] = "chebyshev_polynomial_w_forward";
void chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_w_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_w_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_w_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_w_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_w_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_w_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_w_stub, &chebyshev_polynomial_w_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native { namespace {
enum class EqOpType {EQ, NE};
template<typename scalar_t>
struct CompareEqFunctor{
CompareEqFunctor(EqOpType op): op_(op) {}
const EqOpType op_;
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
if (op_ == EqOpType::EQ) {
return a == b;
} else { //NE
return a != b;
}
}
};
}
C10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBFloat16, kBool,
iter.common_dtype(), "compare_eq_ne_hip", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, CompareEqFunctor<scalar_t>(op));
});
}
void eq_kernel_hip(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::EQ);
}
void ne_kernel_hip(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::NE);
}
REGISTER_DISPATCH(eq_stub, &eq_kernel_hip);
REGISTER_DISPATCH(ne_stub, &ne_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native { namespace {
enum class EqOpType {EQ, NE};
template<typename scalar_t>
struct CompareEqFunctor{
CompareEqFunctor(EqOpType op): op_(op) {}
const EqOpType op_;
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
if (op_ == EqOpType::EQ) {
return a == b;
} else { //NE
return a != b;
}
}
};
}
C10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBFloat16, kBool,
iter.common_dtype(), "compare_eq_ne_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, CompareEqFunctor<scalar_t>(op));
});
}
void eq_kernel_cuda(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::EQ);
}
void ne_kernel_cuda(TensorIteratorBase& iter) {
compare_eq_ne_kernel(iter, EqOpType::NE);
}
REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda);
REGISTER_DISPATCH(ne_stub, &ne_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
namespace {
void complex_kernel_hip(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.input_dtype(0), "complex_hip", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex<scalar_t> {
return c10::complex<scalar_t>(a, b);
});
});
}
void polar_kernel_hip(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.input_dtype(0), "polar_hip", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex<scalar_t> {
return c10::complex<scalar_t>(a * std::cos(b), a * std::sin(b));
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(complex_stub, &complex_kernel_hip);
REGISTER_DISPATCH(polar_stub, &polar_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
namespace {
void complex_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.input_dtype(0), "complex_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex<scalar_t> {
return c10::complex<scalar_t>(a, b);
});
});
}
void polar_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.input_dtype(0), "polar_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex<scalar_t> {
return c10::complex<scalar_t>(a * std::cos(b), a * std::sin(b));
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(complex_stub, &complex_kernel_cuda);
REGISTER_DISPATCH(polar_stub, &polar_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#if defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <c10/hip/HIPMathCompat.h>
#elif defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <c10/hip/HIPMathCompat.h>
#endif
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void copysign_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "copysign_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return c10::hip::compat::copysign(a, b);
});
});
}
REGISTER_DISPATCH(copysign_stub, ©sign_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#if defined(__CUDACC__)
#include <cuda.h>
#include <cuda_fp16.h>
#include <c10/cuda/CUDAMathCompat.h>
#elif defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <c10/hip/HIPMathCompat.h>
#endif
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void copysign_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "copysign_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return c10::cuda::compat::copysign(a, b);
});
});
}
REGISTER_DISPATCH(copysign_stub, ©sign_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#if !defined(USE_ROCM)
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#endif
#if !defined(USE_ROCM)
#include <cub/version.cuh>
#else
#define CUB_VERSION 0
#endif
// cub sort support for __nv_bfloat16 is added to cub 1.13 in:
// https://github.com/NVIDIA/cub/pull/306
#if CUB_VERSION >= 101300
#define CUB_SUPPORTS_NV_BFLOAT16() true
#else
#define CUB_SUPPORTS_NV_BFLOAT16() false
#endif
// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
// https://github.com/NVIDIA/cub/pull/326
// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
// starting from HIP 11.5
#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
#else
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
#endif
// cub support for UniqueByKey is added to cub 1.16 in:
// https://github.com/NVIDIA/cub/pull/405
#if CUB_VERSION >= 101600
#define CUB_SUPPORTS_UNIQUE_BY_KEY() true
#else
#define CUB_SUPPORTS_UNIQUE_BY_KEY() false
#endif
// cub support for scan by key is added to cub 1.15
// in https://github.com/NVIDIA/cub/pull/376
#if CUB_VERSION >= 101500
#define CUB_SUPPORTS_SCAN_BY_KEY() 1
#else
#define CUB_SUPPORTS_SCAN_BY_KEY() 0
#endif
// cub support for hipcub::FutureValue is added to cub 1.15 in:
// https://github.com/NVIDIA/cub/pull/305
#if CUB_VERSION >= 101500
#define CUB_SUPPORTS_FUTURE_VALUE() true
#else
#define CUB_SUPPORTS_FUTURE_VALUE() false
#endif
### |
#pragma once
#if !defined(USE_ROCM)
#include <cuda.h> // for CUDA_VERSION
#endif
#if !defined(USE_ROCM)
#include <cub/version.cuh>
#else
#define CUB_VERSION 0
#endif
// cub sort support for __nv_bfloat16 is added to cub 1.13 in:
// https://github.com/NVIDIA/cub/pull/306
#if CUB_VERSION >= 101300
#define CUB_SUPPORTS_NV_BFLOAT16() true
#else
#define CUB_SUPPORTS_NV_BFLOAT16() false
#endif
// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
// https://github.com/NVIDIA/cub/pull/326
// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
// starting from CUDA 11.5
#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
#else
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
#endif
// cub support for UniqueByKey is added to cub 1.16 in:
// https://github.com/NVIDIA/cub/pull/405
#if CUB_VERSION >= 101600
#define CUB_SUPPORTS_UNIQUE_BY_KEY() true
#else
#define CUB_SUPPORTS_UNIQUE_BY_KEY() false
#endif
// cub support for scan by key is added to cub 1.15
// in https://github.com/NVIDIA/cub/pull/376
#if CUB_VERSION >= 101500
#define CUB_SUPPORTS_SCAN_BY_KEY() 1
#else
#define CUB_SUPPORTS_SCAN_BY_KEY() 0
#endif
// cub support for cub::FutureValue is added to cub 1.15 in:
// https://github.com/NVIDIA/cub/pull/305
#if CUB_VERSION >= 101500
#define CUB_SUPPORTS_FUTURE_VALUE() true
#else
#define CUB_SUPPORTS_FUTURE_VALUE() false
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_local_scalar_dense_native.h>
#endif
#include <ATen/hip\HIPContext.h>
namespace at::native {
Scalar _local_scalar_dense_hip(const Tensor& self) {
Scalar r;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
kComplexHalf, kHalf, kBool, kBFloat16, self.scalar_type(), "_local_scalar_dense_hip", [&] {
scalar_t value;
hipStream_t stream = at::hip::getCurrentHIPStream();
at::cuda::memcpy_and_sync(&value, self.const_data_ptr<scalar_t>(), sizeof(scalar_t), hipMemcpyDeviceToHost, stream);
r = Scalar(value);
});
return r;
}
} // at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_local_scalar_dense_native.h>
#endif
#include <ATen/cuda/CUDAContext.h>
namespace at::native {
Scalar _local_scalar_dense_cuda(const Tensor& self) {
Scalar r;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
kComplexHalf, kHalf, kBool, kBFloat16, self.scalar_type(), "_local_scalar_dense_cuda", [&] {
scalar_t value;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::memcpy_and_sync(&value, self.const_data_ptr<scalar_t>(), sizeof(scalar_t), cudaMemcpyDeviceToHost, stream);
r = Scalar(value);
});
return r;
}
} // at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip\ScanKernels.h>
#include <ATen/native/hip\ScanUtils.cuh>
#include <limits>
#include <functional>
namespace at::native {
void launch_cummax_hip_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummax_hip", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void launch_cummin_hip_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummin_hip", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/ScanKernels.h>
#include <ATen/native/cuda/ScanUtils.cuh>
#include <limits>
#include <functional>
namespace at::native {
void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip\ScanKernels.h>
#include <ATen/native/hip\ScanUtils.cuh>
namespace at::native {
void launch_cumprod_hip_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_hip", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::multiplies<scalar_t>());
});
}
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/ScanKernels.h>
#include <ATen/native/cuda/ScanUtils.cuh>
namespace at::native {
void launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::multiplies<scalar_t>());
});
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip\ScanKernels.h>
#include <ATen/native/hip\ScanUtils.cuh>
namespace at::native {
void launch_cumsum_hip_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "cumsum_hip",
[&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::plus<scalar_t>());
});
}
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/ScanKernels.h>
#include <ATen/native/cuda/ScanUtils.cuh>
namespace at::native {
void launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "cumsum_cuda",
[&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
dim,
init,
std::plus<scalar_t>());
});
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
namespace at { namespace native {
#if defined(USE_ROCM)
// take these out when ROCm implements std:: math functions
#include <math.h>
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
#else
template<typename scalar_t>
__forceinline__ __device__ double device_sqrt(scalar_t val) {
return std::sqrt(val);
}
#endif
}}
### |
#pragma once
namespace at { namespace native {
#if defined(USE_ROCM)
// take these out when ROCm implements std:: math functions
#include <math.h>
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
#else
template<typename scalar_t>
__forceinline__ __device__ double device_sqrt(scalar_t val) {
return std::sqrt(val);
}
#endif
}}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/hip\HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at::native {
void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen_) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen_, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::bernoulli_kernel(self, p_, generator);
}
void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional<Generator> gen) {
auto iter = TensorIterator::borrowing_nullary_op(self);
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::bernoulli_kernel(iter, p, generator);
}
REGISTER_DISPATCH(bernoulli_tensor_stub, &bernoulli_tensor_kernel);
REGISTER_DISPATCH(bernoulli_scalar_stub, &bernoulli_scalar_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at::native {
void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional<Generator> gen_) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::bernoulli_kernel(self, p_, generator);
}
void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional<Generator> gen) {
auto iter = TensorIterator::borrowing_nullary_op(self);
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::bernoulli_kernel(iter, p, generator);
}
REGISTER_DISPATCH(bernoulli_tensor_stub, &bernoulli_tensor_kernel);
REGISTER_DISPATCH(bernoulli_scalar_stub, &bernoulli_scalar_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);
}
REGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);
}
REGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
}
REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
}
REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::geometric_kernel(iter, p_, generator);
}
REGISTER_DISPATCH(geometric_stub, &geometric_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::geometric_kernel(iter, p_, generator);
}
REGISTER_DISPATCH(geometric_stub, &geometric_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator);
}
REGISTER_DISPATCH(log_normal_stub, &log_normal_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator);
}
REGISTER_DISPATCH(log_normal_stub, &log_normal_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/hip\HIPEvent.h>
#include <ATen/hip/detail\UnpackRaw.cuh>
#include <ATen/hip/detail\HIPHooks.h>
#include <ATen/detail/HIPHooksInterface.h>
#include <c10/core/StreamGuard.h>
#include <c10/hip/HIPGraphsC10Utils.h>
#include <c10/hip/HIPGuard.h>
// c10/hip/HIPGraphsC10Utils.h has utils used by both c10 and aten.
// This file adds utils used by aten only.
namespace at {
namespace hip {
using CaptureId_t = c10::hip::CaptureId_t;
using CaptureStatus = c10::hip::CaptureStatus;
// Use this version where you don't want to create a HIP context if none exists.
inline CaptureStatus currentStreamCaptureStatus() {
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
// don't create a context if we don't have to
if (c10::hip::hasPrimaryContext(c10::hip::current_device())) {
return c10::hip::currentStreamCaptureStatusMayInitCtx();
} else {
return CaptureStatus::None;
}
#else
return CaptureStatus::None;
#endif
}
inline void assertNotCapturing(std::string attempt) {
auto status = currentStreamCaptureStatus();
TORCH_CHECK(status == CaptureStatus::None,
attempt,
" during HIP graph capture. If you need this call to be captured, "
"please file an issue. "
"Current hipStreamCaptureStatus: ",
status);
}
inline void errorIfCapturingCudnnBenchmark(std::string version_specific) {
auto status = currentStreamCaptureStatus();
TORCH_CHECK(status == CaptureStatus::None,
"Current hipStreamCaptureStatus: ",
status,
"\nCapturing ",
version_specific,
"is prohibited. Possible causes of this error:\n"
"1. No warmup iterations occurred before capture.\n"
"2. The convolutions you're trying to capture use dynamic shapes, "
"in which case capturing them is generally prohibited.");
}
} // namespace hip
} // namespace at
### |
#pragma once
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/cuda/CUDAEvent.h>
#include <ATen/cuda/detail/UnpackRaw.cuh>
#include <ATen/cuda/detail/CUDAHooks.h>
#include <ATen/detail/CUDAHooksInterface.h>
#include <c10/core/StreamGuard.h>
#include <c10/cuda/CUDAGraphsC10Utils.h>
#include <c10/cuda/CUDAGuard.h>
// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten.
// This file adds utils used by aten only.
namespace at {
namespace cuda {
using CaptureId_t = c10::cuda::CaptureId_t;
using CaptureStatus = c10::cuda::CaptureStatus;
// Use this version where you don't want to create a CUDA context if none exists.
inline CaptureStatus currentStreamCaptureStatus() {
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
// don't create a context if we don't have to
if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) {
return c10::cuda::currentStreamCaptureStatusMayInitCtx();
} else {
return CaptureStatus::None;
}
#else
return CaptureStatus::None;
#endif
}
inline void assertNotCapturing(std::string attempt) {
auto status = currentStreamCaptureStatus();
TORCH_CHECK(status == CaptureStatus::None,
attempt,
" during CUDA graph capture. If you need this call to be captured, "
"please file an issue. "
"Current cudaStreamCaptureStatus: ",
status);
}
inline void errorIfCapturingCudnnBenchmark(std::string version_specific) {
auto status = currentStreamCaptureStatus();
TORCH_CHECK(status == CaptureStatus::None,
"Current cudaStreamCaptureStatus: ",
status,
"\nCapturing ",
version_specific,
"is prohibited. Possible causes of this error:\n"
"1. No warmup iterations occurred before capture.\n"
"2. The convolutions you're trying to capture use dynamic shapes, "
"in which case capturing them is generally prohibited.");
}
} // namespace cuda
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void normal_kernel(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::normal_kernel(self, mean, std, generator);
}
REGISTER_DISPATCH(normal_stub, &normal_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void normal_kernel(const TensorBase &self, double mean, double std, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::normal_kernel(self, mean, std, generator);
}
REGISTER_DISPATCH(normal_stub, &normal_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<HIPGeneratorImpl>(gen_, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
}
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<HIPGeneratorImpl>(gen_, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
}
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<HIPGeneratorImpl>(gen_, cuda::detail::getDefaultHIPGenerator());
at::native::templates::cuda::random_kernel(iter, gen);
}
REGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);
REGISTER_DISPATCH(random_stub, &random_kernel);
REGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);
}
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);
}
void random_kernel(TensorIteratorBase& iter, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::random_kernel(iter, gen);
}
REGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);
REGISTER_DISPATCH(random_stub, &random_kernel);
REGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\DistributionTemplates.h>
namespace at::native {
void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<HIPGeneratorImpl>(gen, cuda::detail::getDefaultHIPGenerator());
templates::cuda::uniform_kernel(iter, from, to, generator);
}
REGISTER_DISPATCH(uniform_stub, &uniform_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
namespace at::native {
void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
templates::cuda::uniform_kernel(iter, from, to, generator);
}
REGISTER_DISPATCH(uniform_stub, &uniform_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/hip\Atomic.cuh>
#include <ATen/hip\HIPContext.h>
#include <ATen/TensorUtils.h>
namespace at {
namespace native {
Tensor embedding_backward_hip_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx = -1,
bool mode_mean = false,
const Tensor &offset2bag = Tensor(),
const Tensor &bag_size = Tensor(),
const Tensor &per_sample_weights = Tensor());
}}
### |
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
namespace at {
namespace native {
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx = -1,
bool mode_mean = false,
const Tensor &offset2bag = Tensor(),
const Tensor &bag_size = Tensor(),
const Tensor &per_sample_weights = Tensor());
}}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Fill.h>
#include <c10/core/Scalar.h>
namespace at::native {
template<typename scalar_t>
struct FillFunctor {
FillFunctor(scalar_t v): value(v) {}
__device__ __forceinline__ scalar_t operator() () const {
return value;
}
private:
scalar_t value;
};
void fill_kernel_hip(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kBool, kHalf, kBFloat16, iter.dtype(), "fill_hip", [&]() {
gpu_kernel(iter, FillFunctor<scalar_t>(value.to<scalar_t>()));
});
}
REGISTER_DISPATCH(fill_stub, &fill_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Fill.h>
#include <c10/core/Scalar.h>
namespace at::native {
template<typename scalar_t>
struct FillFunctor {
FillFunctor(scalar_t v): value(v) {}
__device__ __forceinline__ scalar_t operator() () const {
return value;
}
private:
scalar_t value;
};
void fill_kernel_cuda(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kBool, kHalf, kBFloat16, iter.dtype(), "fill_cuda", [&]() {
gpu_kernel(iter, FillFunctor<scalar_t>(value.to<scalar_t>()));
});
}
REGISTER_DISPATCH(fill_stub, &fill_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/SparseStubs.h>
#include <ATen/native/sparse/FlattenIndicesCommon.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\KernelUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/AccumulateType.h>
namespace at::native {
namespace {
template <typename func_t>
struct HIPKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
Tensor flatten_indices_hip_kernel(const Tensor& indices, IntArrayRef size) {
return _flatten_indices<HIPKernelLauncher>(indices, size);
}
}
REGISTER_HIP_DISPATCH(flatten_indices_stub, &flatten_indices_hip_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/SparseStubs.h>
#include <ATen/native/sparse/FlattenIndicesCommon.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/AccumulateType.h>
namespace at::native {
namespace {
template <typename func_t>
struct CUDAKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
Tensor flatten_indices_cuda_kernel(const Tensor& indices, IntArrayRef size) {
return _flatten_indices<CUDAKernelLauncher>(indices, size);
}
}
REGISTER_CUDA_DISPATCH(flatten_indices_stub, &flatten_indices_cuda_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/NumericUtils.h>
namespace at::native {
// std:: does not have clamp functors
template <typename T>
struct minimum {
__device__ T operator()(const T& a, const T& b) const {
return (_isnan(a) || a < b) ? a : b;
}
};
template <typename T>
struct maximum {
__device__ T operator()(const T& a, const T& b) const {
return (_isnan(a) || a > b) ? a : b;
}
};
} // namespace at::native
### |
#pragma once
#include <ATen/NumericUtils.h>
namespace at::native {
// std:: does not have clamp functors
template <typename T>
struct minimum {
__device__ T operator()(const T& a, const T& b) const {
return (_isnan(a) || a < b) ? a : b;
}
};
template <typename T>
struct maximum {
__device__ T operator()(const T& a, const T& b) const {
return (_isnan(a) || a > b) ? a : b;
}
};
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/TypeDefault.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip\fused_adam_amsgrad_impl.cuh>
#include <ATen/native/hip\fused_adam_impl.cuh>
#include <c10/util/Exception.h>
namespace at::native {
// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.
// this is only for HIP 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.
// As a data point, it took about 20 seconds for HIP 11.7 installed in my environment.
// See https://github.com/pytorch/pytorch/pull/81705 for details.
void _fused_adam_kernel_hip_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
if (amsgrad) {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),
"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout");
_fused_adam_amsgrad_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
} else {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),
"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout");
_fused_adam_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
}
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/TypeDefault.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_amsgrad_impl.cuh>
#include <ATen/native/cuda/fused_adam_impl.cuh>
#include <c10/util/Exception.h>
namespace at::native {
// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.
// this is only for CUDA 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.
// As a data point, it took about 20 seconds for CUDA 11.7 installed in my environment.
// See https://github.com/pytorch/pytorch/pull/81705 for details.
void _fused_adam_kernel_cuda_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
if (amsgrad) {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),
"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout");
_fused_adam_amsgrad_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
} else {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),
"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout");
_fused_adam_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
}
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/TypeDefault.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip\fused_adamw_amsgrad_impl.cuh>
#include <ATen/native/hip\fused_adamw_impl.cuh>
#include <c10/util/Exception.h>
namespace at { namespace native {
// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.
// this is only for HIP 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.
// As a data point, it took about 20 seconds for HIP 11.7 installed in my environment.
// See https://github.com/pytorch/pytorch/pull/81705 for details.
void _fused_adamw_kernel_hip_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
if (amsgrad) {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),
"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout");
_fused_adamw_amsgrad_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
} else {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),
"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout");
_fused_adamw_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
}
}
}} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/TypeDefault.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adamw_amsgrad_impl.cuh>
#include <ATen/native/cuda/fused_adamw_impl.cuh>
#include <c10/util/Exception.h>
namespace at { namespace native {
// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.
// this is only for CUDA 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.
// As a data point, it took about 20 seconds for CUDA 11.7 installed in my environment.
// See https://github.com/pytorch/pytorch/pull/81705 for details.
void _fused_adamw_kernel_cuda_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
if (amsgrad) {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),
"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout");
_fused_adamw_amsgrad_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
} else {
TORCH_CHECK(
at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),
"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout");
_fused_adamw_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);
}
}
}} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip\fused_adamw_amsgrad_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip\fused_adam_utils.cuh>
#include <ATen/native/hip\MultiTensorApply.cuh>
#include <vector>
namespace at { namespace native {
void _fused_adamw_amsgrad_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adamw_kernel_hip", [&]() {
multi_tensor_apply_for_fused_optimizer<5>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 5>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */true,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ADAMW);
});
}
} } // namespace at::native
### |
#include <ATen/native/cuda/fused_adamw_amsgrad_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_utils.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#include <vector>
namespace at { namespace native {
void _fused_adamw_amsgrad_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adamw_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<5>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 5>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */true,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ADAMW);
});
}
} } // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/Tensor.h>
#include <c10/util/Half.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
namespace at {
template <>
inline __half* Tensor::data() const {
return reinterpret_cast<__half*>(data<Half>());
}
} // namespace at
### |
#pragma once
#include <ATen/Tensor.h>
#include <c10/util/Half.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
namespace at {
template <>
inline __half* Tensor::data() const {
return reinterpret_cast<__half*>(data<Half>());
}
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adamw_amsgrad_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adamw_amsgrad_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip\fused_adamw_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip\fused_adam_utils.cuh>
#include <ATen/native/hip\MultiTensorApply.cuh>
#include <vector>
namespace at { namespace native {
void _fused_adamw_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adamw_kernel_hip", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ADAMW);
});
}
} } // namespace at::native
### |
#include <ATen/native/cuda/fused_adamw_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_utils.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#include <vector>
namespace at { namespace native {
void _fused_adamw_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adamw_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ADAMW);
});
}
} } // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adamw_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adamw_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip\fused_adam_amsgrad_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip\fused_adam_utils.cuh>
#include <ATen/native/hip\MultiTensorApply.cuh>
#include <vector>
namespace at::native {
void _fused_adam_amsgrad_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_hip", [&]() {
multi_tensor_apply_for_fused_optimizer<5>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 5>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */true,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ORIGINAL);
});
}
} // namespace at::native
### |
#include <ATen/native/cuda/fused_adam_amsgrad_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_utils.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#include <vector>
namespace at::native {
void _fused_adam_amsgrad_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<5>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 5>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */true,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ORIGINAL);
});
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adam_amsgrad_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adam_amsgrad_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList max_exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip\fused_adam_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip\fused_adam_utils.cuh>
#include <ATen/native/hip\MultiTensorApply.cuh>
#include <vector>
namespace at::native {
void _fused_adam_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_hip", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ORIGINAL);
});
}
} // namespace at::native
### |
#include <ATen/native/cuda/fused_adam_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_utils.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#include <vector>
namespace at::native {
void _fused_adam_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr,
ADAM_MODE::ORIGINAL);
});
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adam_hip_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
#pragma once
#include <ATen/core/Tensor.h>
namespace at { namespace native {
void _fused_adam_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
);
} } // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/hip\jit_utils.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_HIP char gcd_name[] = "gcd";
void gcd_kernel_hip(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_hip", [&]() {
jitted_gpu_kernel</*name=*/gcd_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, gcd_string);
});
#else
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_hip", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
return calc_gcd(a, b);
});
});
#endif // AT_USE_JITERATOR()
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_HIP char lcm_name[] = "lcm";
void lcm_kernel_hip(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_hip", [&]() {
jitted_gpu_kernel</*name=*/lcm_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, lcm_string);
});
#else
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_hip", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
scalar_t g = calc_gcd(a, b);
return (g == 0) ? 0 : ::abs(a / g * b);
});
});
#endif // AT_USE_JITERATOR()
}
REGISTER_DISPATCH(gcd_stub, &gcd_kernel_hip);
REGISTER_DISPATCH(lcm_stub, &lcm_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/cuda/jit_utils.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char gcd_name[] = "gcd";
void gcd_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
jitted_gpu_kernel</*name=*/gcd_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, gcd_string);
});
#else
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
return calc_gcd(a, b);
});
});
#endif // AT_USE_JITERATOR()
}
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char lcm_name[] = "lcm";
void lcm_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() {
jitted_gpu_kernel</*name=*/lcm_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, lcm_string);
});
#else
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
scalar_t g = calc_gcd(a, b);
return (g == 0) ? 0 : ::abs(a / g * b);
});
});
#endif // AT_USE_JITERATOR()
}
REGISTER_DISPATCH(gcd_stub, &gcd_kernel_cuda);
REGISTER_DISPATCH(lcm_stub, &lcm_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char hermite_polynomial_h_name[] = "hermite_polynomial_h_forward";
void hermite_polynomial_h_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_h_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<hermite_polynomial_h_name, scalar_t, scalar_t>(iterator, hermite_polynomial_h_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_h_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return hermite_polynomial_h_forward<scalar_t, true>(x, n);
});
});
#endif
} // hermite_polynomial_h_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(hermite_polynomial_h_stub, &hermite_polynomial_h_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char hermite_polynomial_h_name[] = "hermite_polynomial_h_forward";
void hermite_polynomial_h_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_h_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<hermite_polynomial_h_name, scalar_t, scalar_t>(iterator, hermite_polynomial_h_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_h_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return hermite_polynomial_h_forward<scalar_t, true>(x, n);
});
});
#endif
} // hermite_polynomial_h_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(hermite_polynomial_h_stub, &hermite_polynomial_h_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char hermite_polynomial_he_name[] = "hermite_polynomial_he_forward";
void hermite_polynomial_he_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_he_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<hermite_polynomial_he_name, scalar_t, scalar_t>(iterator, hermite_polynomial_he_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_he_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return hermite_polynomial_he_forward<scalar_t, true>(x, n);
});
});
#endif
} // hermite_polynomial_he_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(hermite_polynomial_he_stub, &hermite_polynomial_he_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char hermite_polynomial_he_name[] = "hermite_polynomial_he_forward";
void hermite_polynomial_he_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_he_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<hermite_polynomial_he_name, scalar_t, scalar_t>(iterator, hermite_polynomial_he_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "hermite_polynomial_he_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return hermite_polynomial_he_forward<scalar_t, true>(x, n);
});
});
#endif
} // hermite_polynomial_he_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(hermite_polynomial_he_stub, &hermite_polynomial_he_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include <ATen/ceil_div.h>
#include <ATen/hip\DeviceUtils.cuh>
#include <ATen/hip\AsmUtils.cuh>
#include <c10/macros/Macros.h>
// Collection of in-kernel scan / prefix sum utilities
namespace at {
namespace hip {
// Inclusive prefix sum for binary vars using intra-warp voting +
// shared memory
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
// Within-warp, we use warp voting.
#if defined (USE_ROCM)
unsigned long long int vote = WARP_BALLOT(in);
T index = __popcll(getLaneMaskLe() & vote);
T carry = __popcll(vote);
#else
T vote = WARP_BALLOT(in);
T index = __popc(getLaneMaskLe() & vote);
T carry = __popc(vote);
#endif
int warp = threadIdx.x / C10_WARP_SIZE;
// Per each warp, write out a value
if (getLaneId() == 0) {
smem[warp] = carry;
}
__syncthreads();
// Sum across warps in one thread. This appears to be faster than a
// warp shuffle scan for CC 3.0+
if (threadIdx.x == 0) {
int current = 0;
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
T v = smem[i];
smem[i] = binop(smem[i], current);
current = binop(current, v);
}
}
__syncthreads();
// load the carry from the preceding warp
if (warp >= 1) {
index = binop(index, smem[warp - 1]);
}
*out = index;
if (KillWARDependency) {
__syncthreads();
}
}
// Exclusive prefix sum for binary vars using intra-warp voting +
// shared memory
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
// Inclusive to exclusive
*out -= (T) in;
// The outgoing carry for all threads is the last warp's sum
*carry = smem[at::ceil_div<int>(blockDim.x, C10_WARP_SIZE) - 1];
if (KillWARDependency) {
__syncthreads();
}
}
}} // namespace at::cuda
### |
#pragma once
#include <ATen/ceil_div.h>
#include <ATen/cuda/DeviceUtils.cuh>
#include <ATen/cuda/AsmUtils.cuh>
#include <c10/macros/Macros.h>
// Collection of in-kernel scan / prefix sum utilities
namespace at {
namespace cuda {
// Inclusive prefix sum for binary vars using intra-warp voting +
// shared memory
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
// Within-warp, we use warp voting.
#if defined (USE_ROCM)
unsigned long long int vote = WARP_BALLOT(in);
T index = __popcll(getLaneMaskLe() & vote);
T carry = __popcll(vote);
#else
T vote = WARP_BALLOT(in);
T index = __popc(getLaneMaskLe() & vote);
T carry = __popc(vote);
#endif
int warp = threadIdx.x / C10_WARP_SIZE;
// Per each warp, write out a value
if (getLaneId() == 0) {
smem[warp] = carry;
}
__syncthreads();
// Sum across warps in one thread. This appears to be faster than a
// warp shuffle scan for CC 3.0+
if (threadIdx.x == 0) {
int current = 0;
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
T v = smem[i];
smem[i] = binop(smem[i], current);
current = binop(current, v);
}
}
__syncthreads();
// load the carry from the preceding warp
if (warp >= 1) {
index = binop(index, smem[warp - 1]);
}
*out = index;
if (KillWARDependency) {
__syncthreads();
}
}
// Exclusive prefix sum for binary vars using intra-warp voting +
// shared memory
template <typename T, bool KillWARDependency, class BinaryFunction>
__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
// Inclusive to exclusive
*out -= (T) in;
// The outgoing carry for all threads is the last warp's sum
*carry = smem[at::ceil_div<int>(blockDim.x, C10_WARP_SIZE) - 1];
if (KillWARDependency) {
__syncthreads();
}
}
}} // namespace at::cuda
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char laguerre_polynomial_l_name[] = "laguerre_polynomial_l_forward";
void laguerre_polynomial_l_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "laguerre_polynomial_l_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<laguerre_polynomial_l_name, scalar_t, scalar_t>(iterator, laguerre_polynomial_l_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "laguerre_polynomial_l_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return laguerre_polynomial_l_forward<scalar_t, true>(x, n);
});
});
#endif
} // laguerre_polynomial_l_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(laguerre_polynomial_l_stub, &laguerre_polynomial_l_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char laguerre_polynomial_l_name[] = "laguerre_polynomial_l_forward";
void laguerre_polynomial_l_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "laguerre_polynomial_l_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<laguerre_polynomial_l_name, scalar_t, scalar_t>(iterator, laguerre_polynomial_l_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "laguerre_polynomial_l_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return laguerre_polynomial_l_forward<scalar_t, true>(x, n);
});
});
#endif
} // laguerre_polynomial_l_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(laguerre_polynomial_l_stub, &laguerre_polynomial_l_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
const char legendre_polynomial_p_name[] = "legendre_polynomial_p_forward";
void legendre_polynomial_p_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "legendre_polynomial_p_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<legendre_polynomial_p_name, scalar_t, scalar_t>(iterator, legendre_polynomial_p_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "legendre_polynomial_p_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return legendre_polynomial_p_forward<scalar_t, true>(x, n);
});
});
#endif
} // legendre_polynomial_p_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(legendre_polynomial_p_stub, &legendre_polynomial_p_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
const char legendre_polynomial_p_name[] = "legendre_polynomial_p_forward";
void legendre_polynomial_p_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "legendre_polynomial_p_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<legendre_polynomial_p_name, scalar_t, scalar_t>(iterator, legendre_polynomial_p_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "legendre_polynomial_p_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return legendre_polynomial_p_forward<scalar_t, true>(x, n);
});
});
#endif
} // legendre_polynomial_p_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(legendre_polynomial_p_stub, &legendre_polynomial_p_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/OpMathType.h>
#include <c10/util/MathConstants.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void logaddexp_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::BFloat16, ScalarType::Half,
iter.dtype(), "logaddexp_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {
const auto a = static_cast<opmath_t>(a_);
const auto b = static_cast<opmath_t>(b_);
if (::isinf(a) && a == b) {
return a;
} else {
const auto m = ::max(a, b);
return m + ::log1p(::exp(-::abs(a - b)));
}
});
});
}
void logaddexp2_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(
ScalarType::BFloat16,
iter.dtype(), "logaddexp2_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
const auto inv_log_2 = static_cast<opmath_t>(1.0 / c10::ln_2<double>);
gpu_kernel(iter, [inv_log_2] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {
const auto a = static_cast<opmath_t>(a_);
const auto b = static_cast<opmath_t>(b_);
if (::isinf(a) && a == b) {
return a;
} else {
const auto m = ::max(a, b);
return m + ::log1p(::exp2(-::abs(a - b))) * inv_log_2;
}
});
});
}
REGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_hip);
REGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/OpMathType.h>
#include <c10/util/MathConstants.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void logaddexp_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::BFloat16, ScalarType::Half,
iter.dtype(), "logaddexp_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {
const auto a = static_cast<opmath_t>(a_);
const auto b = static_cast<opmath_t>(b_);
if (::isinf(a) && a == b) {
return a;
} else {
const auto m = ::max(a, b);
return m + ::log1p(::exp(-::abs(a - b)));
}
});
});
}
void logaddexp2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(
ScalarType::BFloat16,
iter.dtype(), "logaddexp2_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
const auto inv_log_2 = static_cast<opmath_t>(1.0 / c10::ln_2<double>);
gpu_kernel(iter, [inv_log_2] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {
const auto a = static_cast<opmath_t>(a_);
const auto b = static_cast<opmath_t>(b_);
if (::isinf(a) && a == b) {
return a;
} else {
const auto m = ::max(a, b);
return m + ::log1p(::exp2(-::abs(a - b))) * inv_log_2;
}
});
});
}
REGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_cuda);
REGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_hip", [&]() {
jitted_gpu_kernel<modified_bessel_i0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_i0_name[] = "modified_bessel_i0_forward";
void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i0_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_cuda);
} // namespace at::native
### |