hip
stringlengths 140
3.32k
| cuda
stringlengths 84
3.33k
|
---|---|
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/reduction/all_impl.h"
#include <thrust/logical.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace rocm {
__global__ void assign_true(bool* ptr) {
*ptr = true;
}
__global__ void assign_false(bool* ptr) {
*ptr = false;
}
template<>
void LaunchAllKernel(hipStream_t stream, const bool* data, const int size, bool* output) {
if(thrust::all_of(thrust::hip::par.on(stream), data, data + size, thrust::identity<bool>())) {
assign_true<<<1, 1, 0, stream>>>(output);
}
else
{
assign_false<<<1, 1, 0, stream>>>(output);
}
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/reduction/all_impl.h"
#include <thrust/logical.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
__global__ void assign_true(bool* ptr) {
*ptr = true;
}
__global__ void assign_false(bool* ptr) {
*ptr = false;
}
template<>
void LaunchAllKernel(cudaStream_t stream, const bool* data, const int size, bool* output) {
if(thrust::all_of(thrust::cuda::par.on(stream), data, data + size, thrust::identity<bool>())) {
assign_true<<<1, 1, 0, stream>>>(output);
}
else
{
assign_false<<<1, 1, 0, stream>>>(output);
}
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename T>
void LaunchAllKernel(hipStream_t stream, const T* data, const int size, bool* output);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename T>
void LaunchAllKernel(cudaStream_t stream, const T* data, const int size, bool* output);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename TIn, typename TOut>
class ReduceAllL2 final : public RocmKernel {
public:
ReduceAllL2(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename TIn, typename TOut>
class ReduceAllL2 final : public CudaKernel {
public:
ReduceAllL2(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
#include "core/providers/rocm/multi_tensor/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename TIn, typename TOut>
struct MultiTensorReduceL2 {
void operator()(hipStream_t stream, ChunkGroup<1> chunk_group, TOut* output);
};
template <typename Tin, typename Tout>
void ScalarSqrt(hipStream_t stream, Tin* input, Tout* output);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
#include "core/providers/cuda/multi_tensor/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename TIn, typename TOut>
struct MultiTensorReduceL2 {
void operator()(cudaStream_t stream, ChunkGroup<1> chunk_group, TOut* output);
};
template <typename Tin, typename Tout>
void ScalarSqrt(cudaStream_t stream, Tin* input, Tout* output);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/optional.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/reduction/reduction_ops.h"
#include "core/providers/rocm/reduction/reduction_functions.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class ReduceSumTraining final : public ReduceKernel<true> {
public:
ReduceSumTraining(const OpKernelInfo& info) : ReduceKernel<true>(info) {
fast_reduction_ = true;
}
Status ComputeInternal(OpKernelContext* ctx) const override {
return ComputeImplEx<T>(ctx, MIOPEN_REDUCE_TENSOR_ADD);
}
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/optional.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/reduction/reduction_ops.h"
#include "core/providers/cuda/reduction/reduction_functions.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class ReduceSumTraining final : public ReduceKernel<true> {
public:
ReduceSumTraining(const OpKernelInfo& info) : ReduceKernel<true>(info) {
fast_reduction_ = true;
}
Status ComputeInternal(OpKernelContext* ctx) const override {
return ComputeImplEx<T>(ctx, CUDNN_REDUCE_TENSOR_ADD);
}
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/tensor/concatbase.h"
namespace onnxruntime {
namespace rocm {
class ConcatTraining final : public RocmKernel, public ConcatBase {
public:
ConcatTraining(const OpKernelInfo& info) : RocmKernel(info), ConcatBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/tensor/concatbase.h"
namespace onnxruntime {
namespace cuda {
class ConcatTraining final : public CudaKernel, public ConcatBase {
public:
ConcatTraining(const OpKernelInfo& info) : CudaKernel(info), ConcatBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <bool inputk>
class TopK final : public RocmKernel {
public:
TopK(const OpKernelInfo&);
Status ComputeInternal(OpKernelContext*) const override;
private:
int64_t axis_;
int64_t largest_;
int64_t sorted_;
mutable int64_t K_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <bool inputk>
class TopK final : public CudaKernel {
public:
TopK(const OpKernelInfo&);
Status ComputeInternal(OpKernelContext*) const override;
private:
int64_t axis_;
int64_t largest_;
int64_t sorted_;
mutable int64_t K_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class GatherElementsGrad final : public RocmKernel {
public:
GatherElementsGrad(const OpKernelInfo& info) : RocmKernel(info) {
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(0));
}
~GatherElementsGrad() = default;
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct ComputeImpl;
int64_t axis_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class GatherElementsGrad final : public CudaKernel {
public:
GatherElementsGrad(const OpKernelInfo& info) : CudaKernel(info) {
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(0));
}
~GatherElementsGrad() = default;
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct ComputeImpl;
int64_t axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
struct GatherScatterElementsArgs;
template <typename T, typename TIndex>
Status GatherElementsGradImpl(hipStream_t stream, const TIndex* indices_data, const T* updates_data, T* output_data,
const GatherScatterElementsArgs& args);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
struct GatherScatterElementsArgs;
template <typename T, typename TIndex>
Status GatherElementsGradImpl(cudaStream_t stream, const TIndex* indices_data, const T* updates_data, T* output_data,
const GatherScatterElementsArgs& args);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class GatherGrad final : public RocmKernel {
public:
GatherGrad(const OpKernelInfo& info) : RocmKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class GatherGrad final : public CudaKernel {
public:
GatherGrad(const OpKernelInfo& info) : CudaKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/framework/stream_handles.h"
namespace onnxruntime {
namespace rocm {
class CudaScratchBufferAllocator {
public:
explicit CudaScratchBufferAllocator(const RocmKernel& kernel, Stream* stream) : kernel_{kernel}, stream_{stream} {
}
template <typename T>
IAllocatorUniquePtr<T> GetScratchBuffer(size_t count_or_bytes) const {
return kernel_.GetScratchBuffer<T>(count_or_bytes, stream_);
}
private:
const RocmKernel& kernel_;
Stream* stream_;
};
// unit for handling indexing and counting of gathered indices
using GatheredIndexIndex_t = int32_t;
template <typename T, typename TIndex>
void GatherGradImpl(
hipStream_t stream,
const hipDeviceProp_t& prop,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/framework/stream_handles.h"
namespace onnxruntime {
namespace cuda {
class CudaScratchBufferAllocator {
public:
explicit CudaScratchBufferAllocator(const CudaKernel& kernel, Stream* stream) : kernel_{kernel}, stream_{stream} {
}
template <typename T>
IAllocatorUniquePtr<T> GetScratchBuffer(size_t count_or_bytes) const {
return kernel_.GetScratchBuffer<T>(count_or_bytes, stream_);
}
private:
const CudaKernel& kernel_;
Stream* stream_;
};
// unit for handling indexing and counting of gathered indices
using GatheredIndexIndex_t = int32_t;
template <typename T, typename TIndex>
void GatherGradImpl(
cudaStream_t stream,
const cudaDeviceProp& prop,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/tensor/gather_nd.h"
namespace onnxruntime {
namespace rocm {
template <typename Tind>
class GatherNDGrad final : public GatherNDBase {
public:
GatherNDGrad(const OpKernelInfo& info) : GatherNDBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/tensor/gather_nd.h"
namespace onnxruntime {
namespace cuda {
template <typename Tind>
class GatherNDGrad final : public GatherNDBase {
public:
GatherNDGrad(const OpKernelInfo& info) : GatherNDBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include "orttraining/training_ops/rocm/tensor/gather_nd_grad_impl.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/atomic/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void _GatherNDGradKernel(
const size_t num_slices,
const T* update_data,
T* output_data,
const size_t slice_size,
const int64_t* slice_offsets) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size);
uint64_t slice_offset = slice_offsets[i / slice_size];
size_t j = i % slice_size;
atomic_add(output_data + slice_offset + j, update_data[i]);
};
template <typename T>
void GatherNDGradImpl(
hipStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data) {
const unsigned int blocks_per_grid = static_cast<unsigned int>(CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock));
_GatherNDGradKernel<T><<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0, stream>>>(
num_slices, static_cast<const T*>(update_data), static_cast<T*>(output_data), slice_size, input_slice_offsets_data);
}
#define SPECIALIZED_GRAD_IMPL(T) \
template void GatherNDGradImpl<T>(hipStream_t stream, const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data)
SPECIALIZED_GRAD_IMPL(float);
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_GRAD_IMPL(half);
SPECIALIZED_GRAD_IMPL(double);
SPECIALIZED_GRAD_IMPL(BFloat16);
#endif
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include "orttraining/training_ops/cuda/tensor/gather_nd_grad_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/atomic/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _GatherNDGradKernel(
const size_t num_slices,
const T* update_data,
T* output_data,
const size_t slice_size,
const int64_t* slice_offsets) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size);
uint64_t slice_offset = slice_offsets[i / slice_size];
size_t j = i % slice_size;
atomic_add(output_data + slice_offset + j, update_data[i]);
};
template <typename T>
void GatherNDGradImpl(
cudaStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data) {
const unsigned int blocks_per_grid = static_cast<unsigned int>(CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock));
_GatherNDGradKernel<T><<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0, stream>>>(
num_slices, static_cast<const T*>(update_data), static_cast<T*>(output_data), slice_size, input_slice_offsets_data);
}
#define SPECIALIZED_GRAD_IMPL(T) \
template void GatherNDGradImpl<T>(cudaStream_t stream, const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data)
SPECIALIZED_GRAD_IMPL(float);
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_GRAD_IMPL(half);
SPECIALIZED_GRAD_IMPL(double);
SPECIALIZED_GRAD_IMPL(BFloat16);
#endif
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename T>
void GatherNDGradImpl(
hipStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename T>
void GatherNDGradImpl(
cudaStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/tensor/slice_grad.h"
#include "core/providers/cpu/tensor/utils.h"
#include "core/providers/rocm/tensor/slice_impl.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
SliceGrad,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 1)
.InputMemoryType(OrtMemTypeCPUInput, 2)
.InputMemoryType(OrtMemTypeCPUInput, 3)
.InputMemoryType(OrtMemTypeCPUInput, 4)
.InputMemoryType(OrtMemTypeCPUInput, 5)
.TypeConstraint("I", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(),
DataTypeImpl::GetTensorType<int64_t>()}),
SliceGrad);
Tensor* GetOutputGradientTensor(OpKernelContext* ctx) {
const Tensor& shape = *ctx->Input<Tensor>(1);
const TensorShape data_shape(shape.template Data<int64_t>(), shape.Shape().Size());
return ctx->Output(0, data_shape);
}
const Tensor* SliceGrad::GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const {
// The gradient computation logic is same as slice op except the assignment from input tensor to output tensor is
// reversed, hence, the input tensor for slice op code (when used for gradient computation) would be the output
// tensor for gradient op that will have the same shape as the input tensor for slice op when used for slicing and
// not gradient computation.
return GetOutputGradientTensor(ctx);
}
Status SliceGrad::FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts,
TensorShapeVector& input_ends, TensorShapeVector& input_axes,
TensorShapeVector& input_steps) const {
return FillVectorsFromInput(*ctx->Input<Tensor>(2), *ctx->Input<Tensor>(3), ctx->Input<Tensor>(4),
ctx->Input<Tensor>(5), input_starts, input_ends, input_axes, input_steps);
}
Status SliceGrad::CallSliceImp(size_t element_size, size_t dimension_count, const TArray<int64_t>& starts_buffer,
const TArray<int64_t>& steps_buffer, const TArray<int64_t>& input_strides,
const TArray<fast_divmod>& output_strides, OpKernelContext* ctx,
const TensorShape& output_shape) const {
Tensor* gradient_out_tensor = GetOutputGradientTensor(ctx);
HIP_RETURN_IF_ERROR(hipMemsetAsync(gradient_out_tensor->MutableDataRaw(), 0, gradient_out_tensor->SizeInBytes(), Stream(ctx)));
return SliceImplGrad(Stream(ctx),
element_size,
gsl::narrow_cast<int32_t>(dimension_count),
starts_buffer,
steps_buffer,
input_strides,
output_strides,
ctx->Input<Tensor>(0)->DataRaw(),
gradient_out_tensor->MutableDataRaw(),
output_shape.Size());
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/tensor/slice_grad.h"
#include "core/providers/cpu/tensor/utils.h"
#include "core/providers/cuda/tensor/slice_impl.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
SliceGrad,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 1)
.InputMemoryType(OrtMemTypeCPUInput, 2)
.InputMemoryType(OrtMemTypeCPUInput, 3)
.InputMemoryType(OrtMemTypeCPUInput, 4)
.InputMemoryType(OrtMemTypeCPUInput, 5)
.TypeConstraint("I", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(),
DataTypeImpl::GetTensorType<int64_t>()}),
SliceGrad);
Tensor* GetOutputGradientTensor(OpKernelContext* ctx) {
const Tensor& shape = *ctx->Input<Tensor>(1);
const TensorShape data_shape(shape.template Data<int64_t>(), shape.Shape().Size());
return ctx->Output(0, data_shape);
}
const Tensor* SliceGrad::GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const {
// The gradient computation logic is same as slice op except the assignment from input tensor to output tensor is
// reversed, hence, the input tensor for slice op code (when used for gradient computation) would be the output
// tensor for gradient op that will have the same shape as the input tensor for slice op when used for slicing and
// not gradient computation.
return GetOutputGradientTensor(ctx);
}
Status SliceGrad::FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts,
TensorShapeVector& input_ends, TensorShapeVector& input_axes,
TensorShapeVector& input_steps) const {
return FillVectorsFromInput(*ctx->Input<Tensor>(2), *ctx->Input<Tensor>(3), ctx->Input<Tensor>(4),
ctx->Input<Tensor>(5), input_starts, input_ends, input_axes, input_steps);
}
Status SliceGrad::CallSliceImp(size_t element_size, size_t dimension_count, const TArray<int64_t>& starts_buffer,
const TArray<int64_t>& steps_buffer, const TArray<int64_t>& input_strides,
const TArray<fast_divmod>& output_strides, OpKernelContext* ctx,
const TensorShape& output_shape) const {
Tensor* gradient_out_tensor = GetOutputGradientTensor(ctx);
CUDA_RETURN_IF_ERROR(cudaMemsetAsync(gradient_out_tensor->MutableDataRaw(), 0, gradient_out_tensor->SizeInBytes(), Stream(ctx)));
return SliceImplGrad(Stream(ctx),
element_size,
gsl::narrow_cast<int32_t>(dimension_count),
starts_buffer,
steps_buffer,
input_strides,
output_strides,
ctx->Input<Tensor>(0)->DataRaw(),
gradient_out_tensor->MutableDataRaw(),
output_shape.Size());
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/tensor/slice.h"
namespace onnxruntime {
namespace rocm {
class SliceGrad final : public Slice<true> {
public:
SliceGrad(const OpKernelInfo& info) : Slice(info) {}
private:
const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const override;
Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts, TensorShapeVector& input_ends,
TensorShapeVector& input_axes, TensorShapeVector& input_steps) const override;
Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray<int64_t>& starts_buffer,
const TArray<int64_t>& steps_buffer, const TArray<int64_t>& input_strides,
const TArray<fast_divmod>& output_strides, OpKernelContext* ctx, const TensorShape& output_shape)
const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/tensor/slice.h"
namespace onnxruntime {
namespace cuda {
class SliceGrad final : public Slice<true> {
public:
SliceGrad(const OpKernelInfo& info) : Slice(info) {}
private:
const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const override;
Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts, TensorShapeVector& input_ends,
TensorShapeVector& input_axes, TensorShapeVector& input_steps) const override;
Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray<int64_t>& starts_buffer,
const TArray<int64_t>& steps_buffer, const TArray<int64_t>& input_strides,
const TArray<fast_divmod>& output_strides, OpKernelContext* ctx, const TensorShape& output_shape)
const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/tensor/split.h"
#include "orttraining/training_ops/cpu/tensor/split.h"
namespace onnxruntime {
namespace rocm {
class SplitTraining final : public RocmKernel, public SplitBase {
public:
// ONNX Split from opset 13. no support for uneven splits that was added in opset 18.
SplitTraining(const OpKernelInfo& info) : RocmKernel(info), SplitBase(info, 13) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/tensor/split.h"
#include "orttraining/training_ops/cpu/tensor/split.h"
namespace onnxruntime {
namespace cuda {
class SplitTraining final : public CudaKernel, public SplitBase {
public:
// ONNX Split from opset 13. no support for uneven splits that was added in opset 18.
SplitTraining(const OpKernelInfo& info) : CudaKernel(info), SplitBase(info, 13) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
Status TopKImpl(const RocmKernel* kernel, Stream* ort_stream, const T* input_x, T* output_v, int64_t* output_i, const TArray<int64_t>& elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
Status TopKImpl(const CudaKernel* kernel, Stream* ort_stream, const T* input_x, T* output_v, int64_t* output_i, const TArray<int64_t>& elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension);
} // namespace cuda
} // namespace onnxruntime
### |
#include "orttraining/training_ops/rocm/tensor/view.h"
namespace onnxruntime {
namespace rocm {
namespace {
constexpr int view_count_limit = 1024;
std::vector<std::pair<int, int>> GenerateAliasMapping() {
std::vector<std::pair<int, int>> alias_pairs{};
for (int i = 0; i < view_count_limit; ++i) {
alias_pairs.emplace_back(std::make_pair(0, i));
}
return alias_pairs;
}
std::vector<int> GenerateInputMemoryType() {
std::vector<int> input_indexes{};
for (int i = 1; i < 1 + view_count_limit; ++i) {
input_indexes.emplace_back(i);
}
return input_indexes;
}
}
ONNX_OPERATOR_KERNEL_EX(
View, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shapes", DataTypeImpl::GetTensorType<int64_t>())
.InputMemoryType(OrtMemTypeCPUInput, GenerateInputMemoryType())
.Alias(GenerateAliasMapping()), View);
Status View::ComputeInternal(OpKernelContext* context) const {
const Tensor* X = context->Input<Tensor>(0);
size_t bytes_per_elem = X->DataType()->Size();
int view_count = context->InputCount() - 1;
std::vector<TensorShape> y_shapes(view_count);
std::vector<size_t> y_byte_offsets(view_count);
size_t byte_offset = 0;
for (int i = 0; i < view_count; ++i) {
const Tensor* shape_tensor = context->Input<Tensor>(i + 1);
if (shape_tensor->Shape().NumDimensions() != 1) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "A shape tensor must be a vector tensor, got ", shape_tensor->Shape().NumDimensions(), " dimensions");
}
size_t n_dims = static_cast<size_t>(shape_tensor->Shape()[0]);
const int64_t* shape_data = shape_tensor->template Data<int64_t>();
y_shapes[i] = TensorShape(shape_data, n_dims);
y_byte_offsets[i] = byte_offset;
byte_offset += y_shapes[i].Size() * bytes_per_elem;
}
if (byte_offset != X->SizeInBytes()) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "The input view shapes doesn't adds up to match input buffer size.");
}
const void* X_data = X->DataRaw();
for (int i = 0; i < view_count; ++i) {
Tensor* Y = context->Output(i, y_shapes[i]);
if (Y != nullptr) {
if (X_data != Y->MutableDataRaw()) {
const void* source = static_cast<const char*>(X_data) + y_byte_offsets[i];
void* target = Y->MutableDataRaw();
HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, Y->SizeInBytes(), hipMemcpyDeviceToDevice, Stream(context)));
} else {
Y->SetByteOffset(y_byte_offsets[i]);
}
}
}
return Status::OK();
}
}
} ### |
#include "orttraining/training_ops/cuda/tensor/view.h"
namespace onnxruntime {
namespace cuda {
namespace {
constexpr int view_count_limit = 1024;
std::vector<std::pair<int, int>> GenerateAliasMapping() {
std::vector<std::pair<int, int>> alias_pairs{};
for (int i = 0; i < view_count_limit; ++i) {
alias_pairs.emplace_back(std::make_pair(0, i));
}
return alias_pairs;
}
std::vector<int> GenerateInputMemoryType() {
std::vector<int> input_indexes{};
for (int i = 1; i < 1 + view_count_limit; ++i) {
input_indexes.emplace_back(i);
}
return input_indexes;
}
}
ONNX_OPERATOR_KERNEL_EX(
View, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shapes", DataTypeImpl::GetTensorType<int64_t>())
.InputMemoryType(OrtMemTypeCPUInput, GenerateInputMemoryType())
.Alias(GenerateAliasMapping()), View);
Status View::ComputeInternal(OpKernelContext* context) const {
const Tensor* X = context->Input<Tensor>(0);
size_t bytes_per_elem = X->DataType()->Size();
int view_count = context->InputCount() - 1;
std::vector<TensorShape> y_shapes(view_count);
std::vector<size_t> y_byte_offsets(view_count);
size_t byte_offset = 0;
for (int i = 0; i < view_count; ++i) {
const Tensor* shape_tensor = context->Input<Tensor>(i + 1);
if (shape_tensor->Shape().NumDimensions() != 1) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "A shape tensor must be a vector tensor, got ", shape_tensor->Shape().NumDimensions(), " dimensions");
}
size_t n_dims = static_cast<size_t>(shape_tensor->Shape()[0]);
const int64_t* shape_data = shape_tensor->template Data<int64_t>();
y_shapes[i] = TensorShape(shape_data, n_dims);
y_byte_offsets[i] = byte_offset;
byte_offset += y_shapes[i].Size() * bytes_per_elem;
}
if (byte_offset != X->SizeInBytes()) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "The input view shapes doesn't adds up to match input buffer size.");
}
const void* X_data = X->DataRaw();
for (int i = 0; i < view_count; ++i) {
Tensor* Y = context->Output(i, y_shapes[i]);
if (Y != nullptr) {
if (X_data != Y->MutableDataRaw()) {
const void* source = static_cast<const char*>(X_data) + y_byte_offsets[i];
void* target = Y->MutableDataRaw();
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, Y->SizeInBytes(), cudaMemcpyDeviceToDevice, Stream(context)));
} else {
Y->SetByteOffset(y_byte_offsets[i]);
}
}
}
return Status::OK();
}
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class View final : public RocmKernel {
public:
View(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class View final : public CudaKernel {
public:
View(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_TORCH_INTEROP
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/core/framework/torch/refcount_tracker.h"
#include "orttraining/training_ops/rocm/torch/torch_custom_function_kernel.h"
#include "core/framework/ort_value.h"
using namespace onnxruntime::language_interop_ops::torch;
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
PythonOp,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.TypeConstraint("T", DataTypeImpl::AllTensorAndSequenceTensorTypes())
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>()),
PythonOp);
ONNX_OPERATOR_KERNEL_EX(
PythonOpGrad,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0)
.TypeConstraint("T", DataTypeImpl::AllTensorAndSequenceTensorTypes())
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>()),
PythonOpGrad);
Status PythonOp::ComputeInternal(OpKernelContext* context) const {
void* diff_ctx = nullptr;
std::vector<OrtValue> returned_ortvalues;
RunForward(context, &diff_ctx, returned_ortvalues);
SetOutputs(context, diff_ctx, returned_ortvalues);
RefCountTracker::GetInstance().DumpDetails("Forward Kernel Completed");
return Status::OK();
}
Status PythonOpGrad::ComputeInternal(OpKernelContext* context) const {
std::vector<OrtValue> returned_ortvalues;
RunBackward(context, returned_ortvalues);
SetOutputs(context, returned_ortvalues);
RefCountTracker::GetInstance().DumpDetails("Backward Kernel Completed");
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
#endif### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_TORCH_INTEROP
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/core/framework/torch/refcount_tracker.h"
#include "orttraining/training_ops/cuda/torch/torch_custom_function_kernel.h"
#include "core/framework/ort_value.h"
using namespace onnxruntime::language_interop_ops::torch;
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
PythonOp,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.TypeConstraint("T", DataTypeImpl::AllTensorAndSequenceTensorTypes())
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>()),
PythonOp);
ONNX_OPERATOR_KERNEL_EX(
PythonOpGrad,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0)
.TypeConstraint("T", DataTypeImpl::AllTensorAndSequenceTensorTypes())
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>()),
PythonOpGrad);
Status PythonOp::ComputeInternal(OpKernelContext* context) const {
void* diff_ctx = nullptr;
std::vector<OrtValue> returned_ortvalues;
RunForward(context, &diff_ctx, returned_ortvalues);
SetOutputs(context, diff_ctx, returned_ortvalues);
RefCountTracker::GetInstance().DumpDetails("Forward Kernel Completed");
return Status::OK();
}
Status PythonOpGrad::ComputeInternal(OpKernelContext* context) const {
std::vector<OrtValue> returned_ortvalues;
RunBackward(context, returned_ortvalues);
SetOutputs(context, returned_ortvalues);
RefCountTracker::GetInstance().DumpDetails("Backward Kernel Completed");
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_TORCH_INTEROP
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "orttraining/training_ops/cpu/torch/torch_custom_function_kernel_base.h"
namespace onnxruntime {
namespace rocm {
// Pytorch's torch.autograd.Function.apply(...) wrapper.
class PythonOp final : public contrib::PythonOpBase, public RocmKernel {
public:
PythonOp(const OpKernelInfo& info) : contrib::PythonOpBase(info), RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
// Pytorch's torch.autograd.Function.backward(...) wrapper.
class PythonOpGrad final : public contrib::PythonOpGradBase, public RocmKernel {
public:
PythonOpGrad(const OpKernelInfo& info) : contrib::PythonOpGradBase(info), RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
#endif### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_TORCH_INTEROP
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "orttraining/training_ops/cpu/torch/torch_custom_function_kernel_base.h"
namespace onnxruntime {
namespace cuda {
// Pytorch's torch.autograd.Function.apply(...) wrapper.
class PythonOp final : public contrib::PythonOpBase, public CudaKernel {
public:
PythonOp(const OpKernelInfo& info) : contrib::PythonOpBase(info), CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
// Pytorch's torch.autograd.Function.backward(...) wrapper.
class PythonOpGrad final : public contrib::PythonOpGradBase, public CudaKernel {
public:
PythonOpGrad(const OpKernelInfo& info) : contrib::PythonOpGradBase(info), CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE MLFloat16
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE MLFloat16
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE float
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE float
#include "topk_impl.cuh"
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
#ifdef USE_ROCM
constexpr int kElementsPerThread = 2;
constexpr int kThreadsPerBlock = 512;
#else
constexpr int kElementsPerThread = GridDim::maxElementsPerThread;
constexpr int kThreadsPerBlock = GridDim::maxThreadsPerBlock;
#endif
template <typename T, typename FuncT>
__global__ void ElementwiseKernel(T* output_data, const FuncT functor, HIP_LONG N) {
HIP_LONG start = kElementsPerThread * kThreadsPerBlock * blockIdx.x + threadIdx.x;
T value[kElementsPerThread];
HIP_LONG id = start;
#pragma unroll
for (int i = 0; i < kElementsPerThread; ++i) {
if (id < N) {
value[i] = functor(id);
id += kThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < kElementsPerThread; ++i) {
if (id < N) {
output_data[id] = value[i];
id += kThreadsPerBlock;
}
}
}
template <typename T, typename FuncT>
void LaunchElementwiseKernel(hipStream_t stream, T* output_data, const FuncT& functor, size_t output_size) {
if (output_size == 0) return;
HIP_LONG N = static_cast<HIP_LONG>(output_size);
int blocksPerGrid = CeilDiv(N, kThreadsPerBlock * kElementsPerThread);
ElementwiseKernel<T, FuncT><<<blocksPerGrid, kThreadsPerBlock, 0, stream>>>(output_data, functor, N);
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
#ifdef USE_ROCM
constexpr int kElementsPerThread = 2;
constexpr int kThreadsPerBlock = 512;
#else
constexpr int kElementsPerThread = GridDim::maxElementsPerThread;
constexpr int kThreadsPerBlock = GridDim::maxThreadsPerBlock;
#endif
template <typename T, typename FuncT>
__global__ void ElementwiseKernel(T* output_data, const FuncT functor, CUDA_LONG N) {
CUDA_LONG start = kElementsPerThread * kThreadsPerBlock * blockIdx.x + threadIdx.x;
T value[kElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < kElementsPerThread; ++i) {
if (id < N) {
value[i] = functor(id);
id += kThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < kElementsPerThread; ++i) {
if (id < N) {
output_data[id] = value[i];
id += kThreadsPerBlock;
}
}
}
template <typename T, typename FuncT>
void LaunchElementwiseKernel(cudaStream_t stream, T* output_data, const FuncT& functor, size_t output_size) {
if (output_size == 0) return;
CUDA_LONG N = static_cast<CUDA_LONG>(output_size);
int blocksPerGrid = CeilDiv(N, kThreadsPerBlock * kElementsPerThread);
ElementwiseKernel<T, FuncT><<<blocksPerGrid, kThreadsPerBlock, 0, stream>>>(output_data, functor, N);
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE double
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE double
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE int32_t
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE int32_t
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE int64_t
#include "topk_impl.cuh"
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#define TOPK_IMPL_TYPE int64_t
#include "topk_impl.cuh"
### |
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
struct UnaryElementwisePreparation {
const Tensor* input_tensor = nullptr;
Tensor* output_tensor = nullptr;
};
class UnaryElementwise : public RocmKernel {
protected:
UnaryElementwise(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext*) const override {
return Status(common::ONNXRUNTIME, common::FAIL);
}
Status Prepare(OpKernelContext* context, UnaryElementwisePreparation* p) const;
};
template <typename T>
class Abs final : public UnaryElementwise {
public:
Abs(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Neg final : public UnaryElementwise {
public:
Neg(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Floor final : public UnaryElementwise {
public:
Floor(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Ceil final : public UnaryElementwise {
public:
Ceil(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Reciprocal final : public UnaryElementwise {
public:
Reciprocal(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Sqrt final : public UnaryElementwise {
public:
Sqrt(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Log final : public UnaryElementwise {
public:
Log(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Exp final : public UnaryElementwise {
public:
Exp(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Erf final : public UnaryElementwise {
public:
Erf(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Not final : public UnaryElementwise {
public:
Not(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Round final : public UnaryElementwise {
public:
Round(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Sin final : public UnaryElementwise {
public:
Sin(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Cos final : public UnaryElementwise {
public:
Cos(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
}
} ### |
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
struct UnaryElementwisePreparation {
const Tensor* input_tensor = nullptr;
Tensor* output_tensor = nullptr;
};
class UnaryElementwise : public CudaKernel {
protected:
UnaryElementwise(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext*) const override {
return Status(common::ONNXRUNTIME, common::FAIL);
}
Status Prepare(OpKernelContext* context, UnaryElementwisePreparation* p) const;
};
template <typename T>
class Abs final : public UnaryElementwise {
public:
Abs(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Neg final : public UnaryElementwise {
public:
Neg(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Floor final : public UnaryElementwise {
public:
Floor(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Ceil final : public UnaryElementwise {
public:
Ceil(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Reciprocal final : public UnaryElementwise {
public:
Reciprocal(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Sqrt final : public UnaryElementwise {
public:
Sqrt(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Log final : public UnaryElementwise {
public:
Log(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Exp final : public UnaryElementwise {
public:
Exp(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Erf final : public UnaryElementwise {
public:
Erf(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Not final : public UnaryElementwise {
public:
Not(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Round final : public UnaryElementwise {
public:
Round(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Sin final : public UnaryElementwise {
public:
Sin(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class Cos final : public UnaryElementwise {
public:
Cos(const OpKernelInfo& info) : UnaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <functional>
#include <vector>
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
using InputTensorVector = std::vector<std::reference_wrapper<const Tensor>>;
template <typename VariadicElementwiseOpTag,
typename... SupportedElementTypes>
class VariadicElementwiseOp : public RocmKernel {
public:
VariadicElementwiseOp(const OpKernelInfo& info) : RocmKernel(info) {}
private:
Status ComputeInternal(OpKernelContext* context) const override;
template <typename T>
struct NoBroadcastBatchImplDispatchTarget {
Status operator()(hipStream_t stream, const InputTensorVector& inputs, Tensor& output) const;
};
template <typename T>
struct BinaryImplDispatchTarget {
Status operator()(hipStream_t stream, const Tensor& lhs, const Tensor& rhs, Tensor& output) const;
};
template <typename T>
struct GeneralImplDispatchTarget {
Status operator()(hipStream_t stream, const InputTensorVector& inputs, Tensor& output) const;
};
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <functional>
#include <vector>
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
using InputTensorVector = std::vector<std::reference_wrapper<const Tensor>>;
template <typename VariadicElementwiseOpTag,
typename... SupportedElementTypes>
class VariadicElementwiseOp : public CudaKernel {
public:
VariadicElementwiseOp(const OpKernelInfo& info) : CudaKernel(info) {}
private:
Status ComputeInternal(OpKernelContext* context) const override;
template <typename T>
struct NoBroadcastBatchImplDispatchTarget {
Status operator()(cudaStream_t stream, const InputTensorVector& inputs, Tensor& output) const;
};
template <typename T>
struct BinaryImplDispatchTarget {
Status operator()(cudaStream_t stream, const Tensor& lhs, const Tensor& rhs, Tensor& output) const;
};
template <typename T>
struct GeneralImplDispatchTarget {
Status operator()(cudaStream_t stream, const InputTensorVector& inputs, Tensor& output) const;
};
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T, typename VariadicElementwiseOpTag>
void Impl_General(
hipStream_t stream,
int32_t output_rank_or_simple_broadcast,
const TArray<int64_t>* lhs_padded_strides,
const T* lhs_data,
const TArray<int64_t>* rhs_padded_strides,
const T* rhs_data,
const TArray<fast_divmod>* fdm_output_strides,
const fast_divmod& fdm_H,
const fast_divmod& fdm_C,
T* output_data,
size_t count);
constexpr int32_t k_max_input_batch_size = 8;
template <typename T>
using InputBatchArray = TArray<const T*, k_max_input_batch_size>;
template <typename T, typename VariadicElementwiseOpTag>
void Impl_NoBroadcastInputBatch(
hipStream_t stream,
InputBatchArray<T> input_data_batch,
T* output_data,
size_t count);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename VariadicElementwiseOpTag>
void Impl_General(
cudaStream_t stream,
int32_t output_rank_or_simple_broadcast,
const TArray<int64_t>* lhs_padded_strides,
const T* lhs_data,
const TArray<int64_t>* rhs_padded_strides,
const T* rhs_data,
const TArray<fast_divmod>* fdm_output_strides,
const fast_divmod& fdm_H,
const fast_divmod& fdm_C,
T* output_data,
size_t count);
constexpr int32_t k_max_input_batch_size = 8;
template <typename T>
using InputBatchArray = TArray<const T*, k_max_input_batch_size>;
template <typename T, typename VariadicElementwiseOpTag>
void Impl_NoBroadcastInputBatch(
cudaStream_t stream,
InputBatchArray<T> input_data_batch,
T* output_data,
size_t count);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace onnxruntime {
namespace rocm {
namespace variadic_elementwise_ops {
struct Sum {};
struct Min {};
struct Max {};
} // namespace variadic_elementwise_ops
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace onnxruntime {
namespace cuda {
namespace variadic_elementwise_ops {
struct Sum {};
struct Min {};
struct Max {};
} // namespace variadic_elementwise_ops
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class BatchNorm final : public RocmKernel {
public:
BatchNorm(const OpKernelInfo& op_kernel_info)
: RocmKernel{op_kernel_info},
miopen_batch_norm_mode_(miopenBNSpatial),
momentum_(0.9) {
float tmp_epsilon;
ORT_ENFORCE(op_kernel_info.GetAttr<float>("epsilon", &tmp_epsilon).IsOK());
epsilon_ = ClampMiopenBatchNormEpsilon(static_cast<double>(tmp_epsilon));
// spatial or not
int64_t tmp_spatial;
if (op_kernel_info.GetAttr<int64_t>("spatial", &tmp_spatial).IsOK()) {
spatial_ = tmp_spatial;
}
if (spatial_ == 0) {
miopen_batch_norm_mode_ = miopenBNPerActivation;
}
float tmp_momentum;
if (op_kernel_info.GetAttr<float>("momentum", &tmp_momentum).IsOK()) {
momentum_ = static_cast<double>(tmp_momentum);
}
is_training_mode_ = (op_kernel_info.GetAttrOrDefault<int64_t>("training_mode", 0) == 1);
const auto& node = op_kernel_info.node();
auto opset = node.SinceVersion();
// batch norm opset 14 (or higher) is not implemented for training mode
ORT_ENFORCE(!(is_training_mode_ && opset >= 14), "Training mode does not support BN opset 14 (or higher) yet.");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
double epsilon_;
int64_t spatial_ = 1; // default as per spec
miopenBatchNormMode_t miopen_batch_norm_mode_;
double momentum_;
bool is_training_mode_ = 0; // default as per spec
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/cudnn_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class BatchNorm final : public CudaKernel {
public:
BatchNorm(const OpKernelInfo& op_kernel_info)
: CudaKernel{op_kernel_info},
cudnn_batch_norm_mode_(CUDNN_BATCHNORM_SPATIAL),
momentum_(0.9) {
float tmp_epsilon;
ORT_ENFORCE(op_kernel_info.GetAttr<float>("epsilon", &tmp_epsilon).IsOK());
epsilon_ = ClampCudnnBatchNormEpsilon(static_cast<double>(tmp_epsilon));
// spatial or not
int64_t tmp_spatial;
if (op_kernel_info.GetAttr<int64_t>("spatial", &tmp_spatial).IsOK()) {
spatial_ = tmp_spatial;
}
if (spatial_ == 0) {
cudnn_batch_norm_mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
}
float tmp_momentum;
if (op_kernel_info.GetAttr<float>("momentum", &tmp_momentum).IsOK()) {
momentum_ = static_cast<double>(tmp_momentum);
}
is_training_mode_ = (op_kernel_info.GetAttrOrDefault<int64_t>("training_mode", 0) == 1);
const auto& node = op_kernel_info.node();
auto opset = node.SinceVersion();
// batch norm opset 14 (or higher) is not implemented for training mode
ORT_ENFORCE(!(is_training_mode_ && opset >= 14), "Training mode does not support BN opset 14 (or higher) yet.");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
double epsilon_;
int64_t spatial_ = 1; // default as per spec
cudnnBatchNormMode_t cudnn_batch_norm_mode_;
double momentum_;
bool is_training_mode_ = 0; // default as per spec
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/common.h"
#include "core/framework/random_generator.h"
namespace onnxruntime {
namespace rocm {
template <bool UseBitmask>
class Dropout final : public RocmKernel {
public:
Dropout(const OpKernelInfo& info) : RocmKernel(info) {
int64_t seed = 0;
if (info.GetAttr<int64_t>("seed", &seed).IsOK()) {
generator_ = std::make_unique<PhiloxGenerator>(static_cast<uint64_t>(seed));
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
mutable std::unique_ptr<PhiloxGenerator> generator_;
static constexpr float default_ratio_ = 0.5f;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/common.h"
#include "core/framework/random_generator.h"
namespace onnxruntime {
namespace cuda {
template <bool UseBitmask>
class Dropout final : public CudaKernel {
public:
Dropout(const OpKernelInfo& info) : CudaKernel(info) {
int64_t seed = 0;
if (info.GetAttr<int64_t>("seed", &seed).IsOK()) {
generator_ = std::make_unique<PhiloxGenerator>(static_cast<uint64_t>(seed));
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
mutable std::unique_ptr<PhiloxGenerator> generator_;
static constexpr float default_ratio_ = 0.5f;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/framework/random_generator.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void DropoutKernelImpl(const hipDeviceProp_t& prop, hipStream_t stream, const int64_t N,
const int64_t mask_element_count, const float ratio, PhiloxGenerator& generator, const T* X_data,
T* Y_data, void* mask_data, bool use_bitmask);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/framework/random_generator.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void DropoutKernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N,
const int64_t mask_element_count, const float ratio, PhiloxGenerator& generator, const T* X_data,
T* Y_data, void* mask_data, bool use_bitmask);
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename InT, typename OutT, typename FuncT, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _UnaryElementWise(
const InT* input_data,
OutT* output_data,
const FuncT functor,
HIP_LONG N) {
HIP_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
InT value[NumElementsPerThread];
HIP_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
value[i] = input_data[id];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = functor(value[i]);
id += NumThreadsPerBlock;
}
}
}
template <typename InT, typename OutT, typename FuncT>
void UnaryElementWiseImpl(
hipStream_t stream,
const InT* input_data,
OutT* output_data,
const FuncT& func,
size_t count) {
if (count == 0) // special case where there's a dim value of 0 in the shape
return;
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
HIP_LONG N = static_cast<HIP_LONG>(count);
_UnaryElementWise<InT, OutT, FuncT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>
<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
output_data,
func,
N);
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename InT, typename OutT, typename FuncT, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _UnaryElementWise(
const InT* input_data,
OutT* output_data,
const FuncT functor,
CUDA_LONG N) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
InT value[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
value[i] = input_data[id];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = functor(value[i]);
id += NumThreadsPerBlock;
}
}
}
template <typename InT, typename OutT, typename FuncT>
void UnaryElementWiseImpl(
cudaStream_t stream,
const InT* input_data,
OutT* output_data,
const FuncT& func,
size_t count) {
if (count == 0) // special case where there's a dim value of 0 in the shape
return;
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
_UnaryElementWise<InT, OutT, FuncT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>
<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
output_data,
func,
N);
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class InstanceNorm final : public RocmKernel {
public:
InstanceNorm(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
double epsilon_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/cudnn_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class InstanceNorm final : public CudaKernel {
public:
InstanceNorm(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
double epsilon_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/cu_inc/common.cuh"
#include "instance_norm_impl.h"
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2>
__global__ void _InstanceNormKernel(
const T1* __restrict__ input_data,
const T1* __restrict__ scale,
const T1* __restrict__ bias,
const T2* __restrict__ mean,
const T2* __restrict__ variance,
const double variance_correction,
const double epsilon,
const fast_divmod fdm_HW,
const fast_divmod fdm_C,
T1* __restrict__ output_data,
const HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int nc = fdm_HW.div(id);
int n, c;
fdm_C.divmod(nc, n, c);
// Y = scale * (x - mean) / sqrt (std * std + epsilon) + B
output_data[id] = scale[c] * (input_data[id] - (T1)mean[nc]) / _Sqrt((T1)variance[nc] * (T1)variance_correction + (T1)epsilon) + bias[c];
}
template <typename T1, typename T2>
void InstanceNormImpl(
hipStream_t stream,
const T1* input_data,
const T1* scale,
const T1* bias,
const T2* mean,
const T2* variance,
const double variance_correction,
const double epsilon,
const fast_divmod& fdm_HW,
const fast_divmod& fdm_C,
T1* output_data,
size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_InstanceNormKernel<T1, T2><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data, scale, bias, mean, variance, variance_correction, epsilon, fdm_HW, fdm_C, output_data, (HIP_LONG)N);
}
#define SPECIALIZED_IMPL(T1, T2) \
template void InstanceNormImpl<T1, T2>(hipStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* stddev, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count);
SPECIALIZED_IMPL(float, float)
SPECIALIZED_IMPL(double, double)
// When the input data type is float16, the means and variances will flow in as float32 (special case)
SPECIALIZED_IMPL(half, float)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "instance_norm_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2>
__global__ void _InstanceNormKernel(
const T1* __restrict__ input_data,
const T1* __restrict__ scale,
const T1* __restrict__ bias,
const T2* __restrict__ mean,
const T2* __restrict__ variance,
const double variance_correction,
const double epsilon,
const fast_divmod fdm_HW,
const fast_divmod fdm_C,
T1* __restrict__ output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int nc = fdm_HW.div(id);
int n, c;
fdm_C.divmod(nc, n, c);
// Y = scale * (x - mean) / sqrt (std * std + epsilon) + B
output_data[id] = scale[c] * (input_data[id] - (T1)mean[nc]) / _Sqrt((T1)variance[nc] * (T1)variance_correction + (T1)epsilon) + bias[c];
}
template <typename T1, typename T2>
void InstanceNormImpl(
cudaStream_t stream,
const T1* input_data,
const T1* scale,
const T1* bias,
const T2* mean,
const T2* variance,
const double variance_correction,
const double epsilon,
const fast_divmod& fdm_HW,
const fast_divmod& fdm_C,
T1* output_data,
size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_InstanceNormKernel<T1, T2><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data, scale, bias, mean, variance, variance_correction, epsilon, fdm_HW, fdm_C, output_data, (CUDA_LONG)N);
}
#define SPECIALIZED_IMPL(T1, T2) \
template void InstanceNormImpl<T1, T2>(cudaStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* stddev, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count);
SPECIALIZED_IMPL(float, float)
SPECIALIZED_IMPL(double, double)
// When the input data type is float16, the means and variances will flow in as float32 (special case)
SPECIALIZED_IMPL(half, float)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/shared_inc/fast_divmod.h"
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2>
void InstanceNormImpl(
hipStream_t stream,
const T1* input_data,
const T1* scale,
const T1* bias,
const T2* mean,
const T2* variance,
const double variance_correction,
const double epsilon,
const fast_divmod& fdm_HW,
const fast_divmod& fdm_C,
T1* output_data,
size_t count);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/shared_inc/fast_divmod.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2>
void InstanceNormImpl(
cudaStream_t stream,
const T1* input_data,
const T1* scale,
const T1* bias,
const T2* mean,
const T2* variance,
const double variance_correction,
const double epsilon,
const fast_divmod& fdm_HW,
const fast_divmod& fdm_C,
T1* output_data,
size_t count);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
using namespace onnxruntime::rocm;
// NOTE: This was originally a contrib op with 3 type constraints. The ONNX spec merges 'T' and 'V'.
// the kernel is templatized on all three for backwards compatibility, but in ONNX usage T == V.
template <typename T, typename U, typename V, bool simplified>
class LayerNorm final : public RocmKernel {
public:
LayerNorm(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
int64_t axis_;
double epsilon_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
using namespace onnxruntime::cuda;
// NOTE: This was originally a contrib op with 3 type constraints. The ONNX spec merges 'T' and 'V'.
// the kernel is templatized on all three for backwards compatibility, but in ONNX usage T == V.
template <typename T, typename U, typename V, bool simplified>
class LayerNorm final : public CudaKernel {
public:
LayerNorm(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
int64_t axis_;
double epsilon_;
};
} // namespace cuda
} // namespace onnxruntime
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T, typename U, typename V, bool simplified>
void HostApplyLayerNorm(
const hipDeviceProp_t& prop,
hipStream_t stream,
V* output,
U* mean,
U* invvar,
const T* input,
int n1,
int n2,
double epsilon,
const V* gamma,
const V* beta,
const T* skip = nullptr,
const T* bias = nullptr,
T* skip_input_bias_add_output = nullptr);
} // namespace rocm
} // namespace onnxruntime
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename U, typename V, bool simplified>
void HostApplyLayerNorm(
const cudaDeviceProp& prop,
cudaStream_t stream,
V* output,
U* mean,
U* invvar,
const T* input,
int n1,
int n2,
double epsilon,
const V* gamma,
const V* beta,
const T* skip = nullptr,
const T* bias = nullptr,
T* skip_input_bias_add_output = nullptr);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
class MiopenLRNDescriptor final {
public:
MiopenLRNDescriptor();
~MiopenLRNDescriptor();
Status Set(uint32_t N, double alpha, double beta, double K);
operator miopenLRNDescriptor_t() const { return desc_; }
private:
miopenLRNDescriptor_t desc_;
};
template <typename T>
class LRN : public RocmKernel {
public:
LRN(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
MiopenLRNDescriptor norm_desc_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/cudnn_common.h"
namespace onnxruntime {
namespace cuda {
class CudnnLRNDescriptor final {
public:
CudnnLRNDescriptor();
~CudnnLRNDescriptor();
Status Set(uint32_t N, double alpha, double beta, double K);
operator cudnnLRNDescriptor_t() const { return desc_; }
private:
cudnnLRNDescriptor_t desc_;
};
template <typename T>
class LRN : public CudaKernel {
public:
LRN(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
CudnnLRNDescriptor norm_desc_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <vector>
#include "core/framework/tensor_shape.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void MaxPoolWithIndex(
hipStream_t stream,
const TensorShape& input_shape,
const TensorShape& output_shape,
const gsl::span<const int64_t>& kernel_shape,
const gsl::span<const int64_t>& stride_shape,
const gsl::span<const int64_t>& pads,
const gsl::span<const int64_t>& dilations,
int64_t storage_order,
const T* p_input,
T* p_output,
int64_t* p_indices);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <vector>
#include "core/framework/tensor_shape.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void MaxPoolWithIndex(
cudaStream_t stream,
const TensorShape& input_shape,
const TensorShape& output_shape,
const gsl::span<const int64_t>& kernel_shape,
const gsl::span<const int64_t>& stride_shape,
const gsl::span<const int64_t>& pads,
const gsl::span<const int64_t>& dilations,
int64_t storage_order,
const T* p_input,
T* p_output,
int64_t* p_indices);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "shrink.h"
#include "shrink_impl.h"
#include "core/providers/common.h"
using namespace std;
namespace onnxruntime {
namespace rocm {
#define SHRINK_REGISTER_KERNEL(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
Shrink, \
kOnnxDomain, \
9, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.MayInplace(0, 0) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
Shrink<T>);
template <typename T>
Status Shrink<T>::ComputeInternal(OpKernelContext* p_op_kernel_context) const {
typedef typename ToHipType<T>::MappedType HipT;
const Tensor* X = p_op_kernel_context->Input<Tensor>(0);
const auto* x_data = reinterpret_cast<const HipT*>(X->Data<T>());
const TensorShape& x_shape = X->Shape();
const size_t x_size = static_cast<size_t>(x_shape.Size());
Tensor* Y = p_op_kernel_context->Output(0, x_shape);
auto* y_data = reinterpret_cast<HipT*>(Y->MutableData<T>());
ShrinkImpl<HipT>(Stream(p_op_kernel_context), x_data, bias_, lambd_, y_data, x_size);
return Status::OK();
}
SHRINK_REGISTER_KERNEL(float)
SHRINK_REGISTER_KERNEL(double)
SHRINK_REGISTER_KERNEL(MLFloat16)
SHRINK_REGISTER_KERNEL(uint8_t)
SHRINK_REGISTER_KERNEL(int8_t)
SHRINK_REGISTER_KERNEL(uint16_t)
SHRINK_REGISTER_KERNEL(int16_t)
SHRINK_REGISTER_KERNEL(uint32_t)
SHRINK_REGISTER_KERNEL(int32_t)
SHRINK_REGISTER_KERNEL(uint64_t)
SHRINK_REGISTER_KERNEL(int64_t)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "shrink.h"
#include "shrink_impl.h"
#include "core/providers/common.h"
using namespace std;
namespace onnxruntime {
namespace cuda {
#define SHRINK_REGISTER_KERNEL(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
Shrink, \
kOnnxDomain, \
9, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.MayInplace(0, 0) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
Shrink<T>);
template <typename T>
Status Shrink<T>::ComputeInternal(OpKernelContext* p_op_kernel_context) const {
typedef typename ToCudaType<T>::MappedType CudaT;
const Tensor* X = p_op_kernel_context->Input<Tensor>(0);
const auto* x_data = reinterpret_cast<const CudaT*>(X->Data<T>());
const TensorShape& x_shape = X->Shape();
const size_t x_size = static_cast<size_t>(x_shape.Size());
Tensor* Y = p_op_kernel_context->Output(0, x_shape);
auto* y_data = reinterpret_cast<CudaT*>(Y->MutableData<T>());
ShrinkImpl<CudaT>(Stream(p_op_kernel_context), x_data, bias_, lambd_, y_data, x_size);
return Status::OK();
}
SHRINK_REGISTER_KERNEL(float)
SHRINK_REGISTER_KERNEL(double)
SHRINK_REGISTER_KERNEL(MLFloat16)
SHRINK_REGISTER_KERNEL(uint8_t)
SHRINK_REGISTER_KERNEL(int8_t)
SHRINK_REGISTER_KERNEL(uint16_t)
SHRINK_REGISTER_KERNEL(int16_t)
SHRINK_REGISTER_KERNEL(uint32_t)
SHRINK_REGISTER_KERNEL(int32_t)
SHRINK_REGISTER_KERNEL(uint64_t)
SHRINK_REGISTER_KERNEL(int64_t)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class Shrink final : public RocmKernel {
public:
Shrink(const OpKernelInfo& info) : RocmKernel(info) {
float bias_temp;
// if the attribute exists, use the value
if (info.GetAttr<float>("bias", &bias_temp).IsOK())
bias_ = bias_temp;
float lambd_temp;
// if the attribute exists, use the value
if (info.GetAttr<float>("lambd", &lambd_temp).IsOK())
lambd_ = lambd_temp;
}
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const;
private:
float bias_ = 0.0f; // default as per spec
float lambd_ = 0.5f; // default as per spec
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class Shrink final : public CudaKernel {
public:
Shrink(const OpKernelInfo& info) : CudaKernel(info) {
float bias_temp;
// if the attribute exists, use the value
if (info.GetAttr<float>("bias", &bias_temp).IsOK())
bias_ = bias_temp;
float lambd_temp;
// if the attribute exists, use the value
if (info.GetAttr<float>("lambd", &lambd_temp).IsOK())
lambd_ = lambd_temp;
}
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const;
private:
float bias_ = 0.0f; // default as per spec
float lambd_ = 0.5f; // default as per spec
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/cu_inc/common.cuh"
#include "shrink_impl.h"
namespace onnxruntime {
namespace rocm {
// Generic implementation of Shrink
template <typename T>
__global__ void _ShrinkKernel(
const T* input_data,
const float bias,
const float lambda,
T* output_data,
const HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T x = input_data[id];
if (x < -lambda) {
output_data[id] = (T)(x + bias);
} else if (x > lambda) {
output_data[id] = (T)(x - bias);
} else {
output_data[id] = (T)0;
}
}
// Specialized implementation for 'half' type
// the idea is to convert 'half' data to 'float' first,
// do the operation and convert result back to 'half'
template <>
__global__ void _ShrinkKernel(
const half* input_data,
const float bias,
const float lambda,
half* output_data,
const HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
half x = input_data[id];
if ((float)x < -lambda) {
output_data[id] = half((float)x + bias);
} else if ((float)x > lambda) {
output_data[id] = half((float)x - bias);
} else {
output_data[id] = (half)0;
}
}
template <typename T>
void ShrinkImpl(
hipStream_t stream,
const T* input_data,
const float bias,
const float lambda,
T* output_data,
size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_ShrinkKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data, bias, lambda, output_data, (HIP_LONG)N);
}
#define SPECIALIZED_IMPL(T) \
template void ShrinkImpl<T>(hipStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(uint8_t)
SPECIALIZED_IMPL(int8_t)
SPECIALIZED_IMPL(uint16_t)
SPECIALIZED_IMPL(int16_t)
SPECIALIZED_IMPL(uint32_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(int64_t)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "shrink_impl.h"
namespace onnxruntime {
namespace cuda {
// Generic implementation of Shrink
template <typename T>
__global__ void _ShrinkKernel(
const T* input_data,
const float bias,
const float lambda,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T x = input_data[id];
if (x < -lambda) {
output_data[id] = (T)(x + bias);
} else if (x > lambda) {
output_data[id] = (T)(x - bias);
} else {
output_data[id] = (T)0;
}
}
// Specialized implementation for 'half' type
// the idea is to convert 'half' data to 'float' first,
// do the operation and convert result back to 'half'
template <>
__global__ void _ShrinkKernel(
const half* input_data,
const float bias,
const float lambda,
half* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
half x = input_data[id];
if ((float)x < -lambda) {
output_data[id] = half((float)x + bias);
} else if ((float)x > lambda) {
output_data[id] = half((float)x - bias);
} else {
output_data[id] = (half)0;
}
}
template <typename T>
void ShrinkImpl(
cudaStream_t stream,
const T* input_data,
const float bias,
const float lambda,
T* output_data,
size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_ShrinkKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data, bias, lambda, output_data, (CUDA_LONG)N);
}
#define SPECIALIZED_IMPL(T) \
template void ShrinkImpl<T>(cudaStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(uint8_t)
SPECIALIZED_IMPL(int8_t)
SPECIALIZED_IMPL(uint16_t)
SPECIALIZED_IMPL(int16_t)
SPECIALIZED_IMPL(uint32_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(int64_t)
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
#pragma once
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <
typename T, typename Func,
int32_t max_input_batch_size, int32_t num_elements_per_thread>
__global__ void VariadicElementWiseNoBroadcastInputBatchKernel(
Func func,
size_t N,
TArray<const T*, max_input_batch_size> inputs,
T* output) {
const size_t base_idx = num_elements_per_thread * blockDim.x * blockIdx.x + threadIdx.x;
T inputs_buffer[num_elements_per_thread][max_input_batch_size];
int32_t element_count;
size_t element_idx;
#pragma unroll
for (element_count = 0, element_idx = base_idx;
element_count < num_elements_per_thread;
++element_count, element_idx += blockDim.x) {
if (element_idx < N) {
#pragma unroll
for (int32_t input_batch_idx = 0; input_batch_idx < max_input_batch_size; ++input_batch_idx) {
if (input_batch_idx < inputs.Size()) {
inputs_buffer[element_count][input_batch_idx] = inputs[input_batch_idx][element_idx];
}
}
}
}
#pragma unroll
for (element_count = 0, element_idx = base_idx;
element_count < num_elements_per_thread;
++element_count, element_idx += blockDim.x) {
if (element_idx < N) {
// first and second inputs
T output_value = func(
inputs_buffer[element_count][0], inputs_buffer[element_count][1]);
// remaining inputs
#pragma unroll
for (int32_t input_batch_idx = 2; input_batch_idx < max_input_batch_size; ++input_batch_idx) {
if (input_batch_idx < inputs.Size()) {
output_value = func(output_value, inputs_buffer[element_count][input_batch_idx]);
}
}
output[element_idx] = output_value;
}
}
}
// assumptions:
// - inputs.Size() > 1 && inputs.Size() <= max_input_batch_size
// - inputs and output have N elements
template <typename T, typename Func, int32_t max_input_batch_size>
void VariadicElementWiseNoBroadcastInputBatchImpl(
hipStream_t stream,
Func func,
size_t N,
TArray<const T*, max_input_batch_size> inputs,
T* output) {
constexpr int32_t elements_per_thread = GridDim::maxElementsPerThread;
constexpr int32_t threads_per_block = GridDim::maxThreadsPerBlock;
const int32_t blocks_per_grid = static_cast<int32_t>(CeilDiv(N, elements_per_thread * threads_per_block));
VariadicElementWiseNoBroadcastInputBatchKernel<T, Func, max_input_batch_size, elements_per_thread>
<<<blocks_per_grid, threads_per_block, 0, stream>>>(func, N, inputs, output);
}
} // namespace rocm
} // namespace onnxruntime
### |
#pragma once
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <
typename T, typename Func,
int32_t max_input_batch_size, int32_t num_elements_per_thread>
__global__ void VariadicElementWiseNoBroadcastInputBatchKernel(
Func func,
size_t N,
TArray<const T*, max_input_batch_size> inputs,
T* output) {
const size_t base_idx = num_elements_per_thread * blockDim.x * blockIdx.x + threadIdx.x;
T inputs_buffer[num_elements_per_thread][max_input_batch_size];
int32_t element_count;
size_t element_idx;
#pragma unroll
for (element_count = 0, element_idx = base_idx;
element_count < num_elements_per_thread;
++element_count, element_idx += blockDim.x) {
if (element_idx < N) {
#pragma unroll
for (int32_t input_batch_idx = 0; input_batch_idx < max_input_batch_size; ++input_batch_idx) {
if (input_batch_idx < inputs.Size()) {
inputs_buffer[element_count][input_batch_idx] = inputs[input_batch_idx][element_idx];
}
}
}
}
#pragma unroll
for (element_count = 0, element_idx = base_idx;
element_count < num_elements_per_thread;
++element_count, element_idx += blockDim.x) {
if (element_idx < N) {
// first and second inputs
T output_value = func(
inputs_buffer[element_count][0], inputs_buffer[element_count][1]);
// remaining inputs
#pragma unroll
for (int32_t input_batch_idx = 2; input_batch_idx < max_input_batch_size; ++input_batch_idx) {
if (input_batch_idx < inputs.Size()) {
output_value = func(output_value, inputs_buffer[element_count][input_batch_idx]);
}
}
output[element_idx] = output_value;
}
}
}
// assumptions:
// - inputs.Size() > 1 && inputs.Size() <= max_input_batch_size
// - inputs and output have N elements
template <typename T, typename Func, int32_t max_input_batch_size>
void VariadicElementWiseNoBroadcastInputBatchImpl(
cudaStream_t stream,
Func func,
size_t N,
TArray<const T*, max_input_batch_size> inputs,
T* output) {
constexpr int32_t elements_per_thread = GridDim::maxElementsPerThread;
constexpr int32_t threads_per_block = GridDim::maxThreadsPerBlock;
const int32_t blocks_per_grid = static_cast<int32_t>(CeilDiv(N, elements_per_thread * threads_per_block));
VariadicElementWiseNoBroadcastInputBatchKernel<T, Func, max_input_batch_size, elements_per_thread>
<<<blocks_per_grid, threads_per_block, 0, stream>>>(func, N, inputs, output);
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace onnxruntime {
namespace rocm {
template <typename T>
void ShrinkImpl(
hipStream_t stream,
const T* input_data,
const float bias,
const float lambda,
T* output_data,
size_t count);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace onnxruntime {
namespace cuda {
template <typename T>
void ShrinkImpl(
cudaStream_t stream,
const T* input_data,
const float bias,
const float lambda,
T* output_data,
size_t count);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_NVTX_PROFILE
#include "nvtx_profile.h"
#include "core/common/common.h"
#include <nvToolsExt.h>
#include <nvToolsExtCuda.h>
namespace onnxruntime {
namespace profile {
void NvtxRangeCreator::BeginImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxEventAttributes_t eventAttrib;
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = static_cast<uint32_t>(color_);
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = message_.c_str();
range_id_ = nvtxRangeStartEx(&eventAttrib);
}
void NvtxRangeCreator::EndImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxRangeEnd(range_id_);
}
void NvtxNestedRangeCreator::BeginImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxEventAttributes_t eventAttrib;
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = static_cast<uint32_t>(color_);
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = message_.c_str();
nvtxRangePushEx(&eventAttrib);
}
void NvtxNestedRangeCreator::EndImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxRangePop();
}
void NvtxMarkerCreator::Mark() {
// enable only for debug builds because this function is for profiling only.
nvtxEventAttributes_t eventAttrib;
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = static_cast<uint32_t>(color_);
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = message_.c_str();
nvtxMarkEx(&eventAttrib);
}
} // namespace profile
} // namespace onnxruntime
#endif### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_NVTX_PROFILE
#include "nvtx_profile.h"
#include "core/common/common.h"
#include <nvToolsExt.h>
#include <nvToolsExtCuda.h>
namespace onnxruntime {
namespace profile {
void NvtxRangeCreator::BeginImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxEventAttributes_t eventAttrib;
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = static_cast<uint32_t>(color_);
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = message_.c_str();
range_id_ = nvtxRangeStartEx(&eventAttrib);
}
void NvtxRangeCreator::EndImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxRangeEnd(range_id_);
}
void NvtxNestedRangeCreator::BeginImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxEventAttributes_t eventAttrib;
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = static_cast<uint32_t>(color_);
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = message_.c_str();
nvtxRangePushEx(&eventAttrib);
}
void NvtxNestedRangeCreator::EndImpl() {
// enable only for debug builds because this function is for profiling only.
nvtxRangePop();
}
void NvtxMarkerCreator::Mark() {
// enable only for debug builds because this function is for profiling only.
nvtxEventAttributes_t eventAttrib;
eventAttrib.version = NVTX_VERSION;
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
eventAttrib.colorType = NVTX_COLOR_ARGB;
eventAttrib.color = static_cast<uint32_t>(color_);
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
eventAttrib.message.ascii = message_.c_str();
nvtxMarkEx(&eventAttrib);
}
} // namespace profile
} // namespace onnxruntime
#endif
### |
#ifdef ENABLE_NVTX_PROFILE
#pragma once
#include <cinttypes>
#include <cstdlib>
#include <iostream>
#include <stdexcept>
#include <string>
#include "core/common/common.h"
namespace onnxruntime {
namespace profile {
enum class Color : uint32_t {
Black = 0x00000000, Red = 0x00ff0000, DarkGreen = 0x00009900, Green = 0x0000ff00, LightGreen = 0x00ccffcc, Blue = 0x000000ff, Amber = 0x00ffbf00, LightAmber = 0x00fff2cc, White = 0x00ffffff, Cyan = 0x0000ffff, Magenta = 0x00ff00ff, Yellow = 0x00ffff00, };
class RangeCreatorBase {
public:
RangeCreatorBase(const std::string message, const Color color)
: message_(message), color_(color), is_begin_called_(false), is_end_called_(false){};
~RangeCreatorBase() {
if (!is_begin_called_) {
std::cerr << "Begin must be called once." << std::endl;
}
if (!is_end_called_) {
std::cerr << "End must be called once." << std::endl;
}
}
void Begin() {
ORT_ENFORCE(!is_begin_called_, "Begin cannot be called more than once.");
ORT_ENFORCE(!is_end_called_, "Begin cannot be called after calling End.");
BeginImpl();
is_begin_called_ = true;
}
void End() {
ORT_ENFORCE(is_begin_called_, "End must be called after calling Begin.");
ORT_ENFORCE(!is_end_called_, "End cannot be called more than once.");
EndImpl();
is_end_called_ = true;
}
bool IsBeginCalled() const {
return is_begin_called_;
}
bool IsEndCalled() const {
return is_end_called_;
}
virtual void BeginImpl() = 0;
virtual void EndImpl() = 0;
protected:
const std::string message_;
const Color color_;
bool is_begin_called_;
bool is_end_called_;
};
class NvtxRangeCreator final : public RangeCreatorBase {
public:
NvtxRangeCreator(const std::string message, const Color color)
: RangeCreatorBase(message, color){};
void BeginImpl() override;
void EndImpl() override;
private:
uint64_t range_id_;
};
class NvtxNestedRangeCreator final : public RangeCreatorBase {
public:
NvtxNestedRangeCreator(const std::string message, const Color color)
: RangeCreatorBase(message, color){};
void BeginImpl() override;
void EndImpl() override;
};
class NvtxMarkerCreator final {
public:
NvtxMarkerCreator(const std::string message, const Color color)
: message_(message), color_(color){};
void Mark();
private:
const std::string message_;
const Color color_;
};
}
}
#endif### |
#ifdef ENABLE_NVTX_PROFILE
#pragma once
#include <cinttypes>
#include <cstdlib>
#include <iostream>
#include <stdexcept>
#include <string>
#include "core/common/common.h"
namespace onnxruntime {
namespace profile {
enum class Color : uint32_t {
Black = 0x00000000, Red = 0x00ff0000, DarkGreen = 0x00009900, Green = 0x0000ff00, LightGreen = 0x00ccffcc, Blue = 0x000000ff, Amber = 0x00ffbf00, LightAmber = 0x00fff2cc, White = 0x00ffffff, Cyan = 0x0000ffff, Magenta = 0x00ff00ff, Yellow = 0x00ffff00, };
class RangeCreatorBase {
public:
RangeCreatorBase(const std::string message, const Color color)
: message_(message), color_(color), is_begin_called_(false), is_end_called_(false){};
~RangeCreatorBase() {
if (!is_begin_called_) {
std::cerr << "Begin must be called once." << std::endl;
}
if (!is_end_called_) {
std::cerr << "End must be called once." << std::endl;
}
}
void Begin() {
ORT_ENFORCE(!is_begin_called_, "Begin cannot be called more than once.");
ORT_ENFORCE(!is_end_called_, "Begin cannot be called after calling End.");
BeginImpl();
is_begin_called_ = true;
}
void End() {
ORT_ENFORCE(is_begin_called_, "End must be called after calling Begin.");
ORT_ENFORCE(!is_end_called_, "End cannot be called more than once.");
EndImpl();
is_end_called_ = true;
}
bool IsBeginCalled() const {
return is_begin_called_;
}
bool IsEndCalled() const {
return is_end_called_;
}
virtual void BeginImpl() = 0;
virtual void EndImpl() = 0;
protected:
const std::string message_;
const Color color_;
bool is_begin_called_;
bool is_end_called_;
};
class NvtxRangeCreator final : public RangeCreatorBase {
public:
NvtxRangeCreator(const std::string message, const Color color)
: RangeCreatorBase(message, color){};
void BeginImpl() override;
void EndImpl() override;
private:
uint64_t range_id_;
};
class NvtxNestedRangeCreator final : public RangeCreatorBase {
public:
NvtxNestedRangeCreator(const std::string message, const Color color)
: RangeCreatorBase(message, color){};
void BeginImpl() override;
void EndImpl() override;
};
class NvtxMarkerCreator final {
public:
NvtxMarkerCreator(const std::string message, const Color color)
: message_(message), color_(color){};
void Mark();
private:
const std::string message_;
const Color color_;
};
}
}
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <thread>
#include <string>
#include <unordered_map>
#include "core/platform/ort_mutex.h"
#ifdef ENABLE_NVTX_PROFILE
namespace onnxruntime {
namespace profile {
// Singleton class of managing global NVTX profiling information.
class Context {
public:
static Context& GetInstance() {
static Context instance_;
return instance_;
}
// Return tag for the specified thread.
// If the thread's tag doesn't exist, this function returns an empty string.
std::string GetThreadTagOrDefault(const std::thread::id& thread_id) {
const std::lock_guard<OrtMutex> lock(mtx_);
return thread_tag_[thread_id];
}
// Set tag for the specified thread.
void SetThreadTag(
const std::thread::id& thread_id, const std::string& tag) {
const std::lock_guard<OrtMutex> lock(mtx_);
thread_tag_[thread_id] = tag;
}
private:
Context() = default;
~Context() = default;
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
// map from thread's id to its human-readable tag.
std::unordered_map<std::thread::id, std::string> thread_tag_;
OrtMutex mtx_;
};
} // namespace profile
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <thread>
#include <string>
#include <unordered_map>
#include "core/platform/ort_mutex.h"
#ifdef ENABLE_NVTX_PROFILE
namespace onnxruntime {
namespace profile {
// Singleton class of managing global NVTX profiling information.
class Context {
public:
static Context& GetInstance() {
static Context instance_;
return instance_;
}
// Return tag for the specified thread.
// If the thread's tag doesn't exist, this function returns an empty string.
std::string GetThreadTagOrDefault(const std::thread::id& thread_id) {
const std::lock_guard<OrtMutex> lock(mtx_);
return thread_tag_[thread_id];
}
// Set tag for the specified thread.
void SetThreadTag(
const std::thread::id& thread_id, const std::string& tag) {
const std::lock_guard<OrtMutex> lock(mtx_);
thread_tag_[thread_id] = tag;
}
private:
Context() = default;
~Context() = default;
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
// map from thread's id to its human-readable tag.
std::unordered_map<std::thread::id, std::string> thread_tag_;
OrtMutex mtx_;
};
} // namespace profile
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/object_detection/non_max_suppression.h"
namespace onnxruntime {
namespace rocm {
struct NonMaxSuppression final : public RocmKernel, public NonMaxSuppressionBase {
explicit NonMaxSuppression(const OpKernelInfo& info) : RocmKernel(info), NonMaxSuppressionBase(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(NonMaxSuppression);
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/object_detection/non_max_suppression.h"
namespace onnxruntime {
namespace cuda {
struct NonMaxSuppression final : public CudaKernel, public NonMaxSuppressionBase {
explicit NonMaxSuppression(const OpKernelInfo& info) : CudaKernel(info), NonMaxSuppressionBase(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(NonMaxSuppression);
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include <functional>
#include "core/providers/cpu/object_detection/non_max_suppression_helper.h"
namespace onnxruntime {
namespace rocm {
Status NonMaxSuppressionImpl(
hipStream_t stream,
std::function<IAllocatorUniquePtr<void>(size_t)> allocator,
const PrepareContext& pc,
const int64_t center_point_box,
int64_t batch_index,
int64_t class_index,
int max_output_boxes_per_class,
float iou_threshold,
float score_threshold,
IAllocatorUniquePtr<void>& selected_indices,
int* h_number_selected);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include <functional>
#include "core/providers/cpu/object_detection/non_max_suppression_helper.h"
namespace onnxruntime {
namespace cuda {
Status NonMaxSuppressionImpl(
cudaStream_t stream,
std::function<IAllocatorUniquePtr<void>(size_t)> allocator,
const PrepareContext& pc,
const int64_t center_point_box,
int64_t batch_index,
int64_t class_index,
int max_output_boxes_per_class,
float iou_threshold,
float score_threshold,
IAllocatorUniquePtr<void>& selected_indices,
int* h_number_selected);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "roialign.h"
#include "roialign_impl.h"
namespace onnxruntime {
namespace rocm {
#define REGISTER_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
RoiAlign, \
kOnnxDomain, \
10, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("T2", DataTypeImpl::GetTensorType<int64_t>()), \
RoiAlign<T>);
template <typename T>
Status RoiAlign<T>::ComputeInternal(OpKernelContext* context) const {
// X
const auto* X_ptr = context->Input<Tensor>(0);
// rois
const auto* rois_ptr = context->Input<Tensor>(1);
// batch indices
const auto* batch_indices_ptr = context->Input<Tensor>(2);
const auto& x_dims = X_ptr->Shape();
const auto& rois_dims = rois_ptr->Shape();
const auto& batch_indices_dims = batch_indices_ptr->Shape();
auto num_rois = batch_indices_dims[0];
auto num_roi_cols = rois_dims[1];
auto status = CheckROIAlignValidInput(X_ptr, rois_ptr, batch_indices_ptr);
if (status != Status::OK()) {
return status;
}
Tensor& Y = *context->Output(0, {num_rois, x_dims[1], this->output_height_, this->output_width_});
int64_t output_size = Y.Shape().Size();
if (output_size > 0) {
RoiAlignImpl(
Stream(context),
output_size, // num threads
reinterpret_cast<const typename ToHipType<T>::MappedType*>(X_ptr->Data<T>()),
ToHipType<T>::FromFloat(this->spatial_scale_),
x_dims[1], // num channels
x_dims[2], // height
x_dims[3], // width
this->output_height_,
this->output_width_,
this->sampling_ratio_,
reinterpret_cast<const typename ToHipType<T>::MappedType*>(rois_ptr->Data<T>()),
num_roi_cols,
reinterpret_cast<typename ToHipType<T>::MappedType*>(Y.MutableData<T>()),
this->mode_ == RoiAlignMode::avg,
this->half_pixel_,
batch_indices_ptr->Data<int64_t>());
}
return Status::OK();
}
#define SPECIALIZED_COMPUTE(T) \
REGISTER_KERNEL_TYPED(T) \
template Status RoiAlign<T>::ComputeInternal(OpKernelContext* ctx) const;
SPECIALIZED_COMPUTE(float)
SPECIALIZED_COMPUTE(double)
// SPECIALIZED_COMPUTE(MLFloat16)
} // namespace rocm
}; // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "roialign.h"
#include "roialign_impl.h"
namespace onnxruntime {
namespace cuda {
#define REGISTER_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
RoiAlign, \
kOnnxDomain, \
10, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("T2", DataTypeImpl::GetTensorType<int64_t>()), \
RoiAlign<T>);
template <typename T>
Status RoiAlign<T>::ComputeInternal(OpKernelContext* context) const {
// X
const auto* X_ptr = context->Input<Tensor>(0);
// rois
const auto* rois_ptr = context->Input<Tensor>(1);
// batch indices
const auto* batch_indices_ptr = context->Input<Tensor>(2);
const auto& x_dims = X_ptr->Shape();
const auto& rois_dims = rois_ptr->Shape();
const auto& batch_indices_dims = batch_indices_ptr->Shape();
auto num_rois = batch_indices_dims[0];
auto num_roi_cols = rois_dims[1];
auto status = CheckROIAlignValidInput(X_ptr, rois_ptr, batch_indices_ptr);
if (status != Status::OK()) {
return status;
}
Tensor& Y = *context->Output(0, {num_rois, x_dims[1], this->output_height_, this->output_width_});
int64_t output_size = Y.Shape().Size();
if (output_size > 0) {
RoiAlignImpl(
Stream(context),
output_size, // num threads
reinterpret_cast<const typename ToCudaType<T>::MappedType*>(X_ptr->Data<T>()),
ToCudaType<T>::FromFloat(this->spatial_scale_),
x_dims[1], // num channels
x_dims[2], // height
x_dims[3], // width
this->output_height_,
this->output_width_,
this->sampling_ratio_,
reinterpret_cast<const typename ToCudaType<T>::MappedType*>(rois_ptr->Data<T>()),
num_roi_cols,
reinterpret_cast<typename ToCudaType<T>::MappedType*>(Y.MutableData<T>()),
this->mode_ == RoiAlignMode::avg,
this->half_pixel_,
batch_indices_ptr->Data<int64_t>());
}
return Status::OK();
}
#define SPECIALIZED_COMPUTE(T) \
REGISTER_KERNEL_TYPED(T) \
template Status RoiAlign<T>::ComputeInternal(OpKernelContext* ctx) const;
SPECIALIZED_COMPUTE(float)
SPECIALIZED_COMPUTE(double)
// SPECIALIZED_COMPUTE(MLFloat16)
} // namespace cuda
}; // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/object_detection/roialign.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
struct RoiAlign final : RocmKernel, RoiAlignBase {
RoiAlign(const OpKernelInfo& info) : RocmKernel(info), RoiAlignBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RoiAlign);
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/object_detection/roialign.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
struct RoiAlign final : CudaKernel, RoiAlignBase {
RoiAlign(const OpKernelInfo& info) : CudaKernel(info), RoiAlignBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RoiAlign);
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void RoiAlignImpl(
hipStream_t stream,
const int64_t nthreads,
const T* bottom_data,
const T spatial_scale,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio,
const T* bottom_rois,
int64_t roi_cols,
T* top_data,
const bool is_mode_avg,
const bool half_pixel,
const int64_t* batch_indices_ptr);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void RoiAlignImpl(
cudaStream_t stream,
const int64_t nthreads,
const T* bottom_data,
const T spatial_scale,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio,
const T* bottom_rois,
int64_t roi_cols,
T* top_data,
const bool is_mode_avg,
const bool half_pixel,
const int64_t* batch_indices_ptr);
} // namespace cuda
} // namespace onnxruntime
### |
#pragma once
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/shared_inc/accumulation_type.h"
namespace onnxruntime {
namespace rocm {
namespace detail {
size_t compute_reduce_matrix_columns_intermediate_buffer_size(
int element_size, int num_rows, int num_cols);
}
template <typename TIn>
size_t compute_reduce_matrix_columns_buffer_size(int m, int n) {
using TBuf = AccumulationType_t<TIn>;
return detail::compute_reduce_matrix_columns_intermediate_buffer_size(
sizeof(TBuf), m, n);
}
template <typename TIn>
size_t compute_reduction_buffer_size(int size) {
using TBuf = AccumulationType_t<TIn>;
return detail::compute_reduce_matrix_columns_intermediate_buffer_size(
sizeof(TBuf), 1, size);
}
template <typename TIn, typename TOut>
Status reduce_sum(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
template <typename TIn, typename TOut>
Status reduce_square_sum(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
template <typename TIn, typename TOut>
Status reduce_l2_norm(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
template <typename TIn, typename TOut>
Status reduce_mean(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
enum class ApplicableMatrixReduction {
Rows, Columns, None, };
ApplicableMatrixReduction get_applicable_matrix_reduction(
const miopenReduceTensorOp_t miopen_reduce_op, gsl::span<const int64_t> dims, gsl::span<const int64_t> axes, int& m, int& n);
template <typename TIn, typename TOut>
Status reduce_matrix_rows(hipStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output = true);
template <typename TIn, typename TOut>
Status reduce_matrix_columns(hipStream_t stream, const TIn* input, TOut* output, int m, int n, void* buffer, size_t buffer_size);
template <typename T>
void UnaryDiv(hipStream_t stream, const T* input, T* output, T denominator, size_t count);
}
} ### |
#pragma once
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/accumulation_type.h"
namespace onnxruntime {
namespace cuda {
namespace detail {
size_t compute_reduce_matrix_columns_intermediate_buffer_size(
int element_size, int num_rows, int num_cols);
}
template <typename TIn>
size_t compute_reduce_matrix_columns_buffer_size(int m, int n) {
using TBuf = AccumulationType_t<TIn>;
return detail::compute_reduce_matrix_columns_intermediate_buffer_size(
sizeof(TBuf), m, n);
}
template <typename TIn>
size_t compute_reduction_buffer_size(int size) {
using TBuf = AccumulationType_t<TIn>;
return detail::compute_reduce_matrix_columns_intermediate_buffer_size(
sizeof(TBuf), 1, size);
}
template <typename TIn, typename TOut>
Status reduce_sum(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
template <typename TIn, typename TOut>
Status reduce_square_sum(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
template <typename TIn, typename TOut>
Status reduce_l2_norm(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
template <typename TIn, typename TOut>
Status reduce_mean(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);
enum class ApplicableMatrixReduction {
Rows, Columns, None, };
ApplicableMatrixReduction get_applicable_matrix_reduction(
const cudnnReduceTensorOp_t cudnn_reduce_op, gsl::span<const int64_t> dims, gsl::span<const int64_t> axes, int& m, int& n);
template <typename TIn, typename TOut>
Status reduce_matrix_rows(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output = true);
template <typename TIn, typename TOut>
Status reduce_matrix_columns(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, void* buffer, size_t buffer_size);
template <typename T>
void UnaryDiv(cudaStream_t stream, const T* input, T* output, T denominator, size_t count);
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/rocm_check_memory.h"
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
void CheckIfMemoryOnCurrentGpuDevice(const void* ptr) {
hipPointerAttribute_t attrs;
HIP_CALL_THROW(hipPointerGetAttributes(&attrs, ptr));
int current_device;
HIP_CALL_THROW(hipGetDevice(¤t_device));
ORT_ENFORCE(attrs.device == current_device,
"Current ROCM device is ", current_device,
" but the memory of pointer ", ptr,
" is allocated on device ", attrs.device);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_check_memory.h"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
void CheckIfMemoryOnCurrentGpuDevice(const void* ptr) {
cudaPointerAttributes attrs;
CUDA_CALL_THROW(cudaPointerGetAttributes(&attrs, ptr));
int current_device;
CUDA_CALL_THROW(cudaGetDevice(¤t_device));
ORT_ENFORCE(attrs.device == current_device,
"Current CUDA device is ", current_device,
" but the memory of pointer ", ptr,
" is allocated on device ", attrs.device);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
__forceinline__ __host__ __device__ int least_pow2_bound(int value) {
unsigned int value_ = static_cast<unsigned int>(value);
--value_;
value_ |= value_ >> 1;
value_ |= value_ >> 2;
value_ |= value_ >> 4;
value_ |= value_ >> 8;
value_ |= value_ >> 16;
return static_cast<int>(++value_);
}
struct Square {
template <typename T>
__forceinline__ __device__ T operator()(const T& value) {
return value * value;
}
};
struct Sqrt {
template <typename T>
__forceinline__ __device__ T operator()(const T& value) {
return _Sqrt(value);
}
};
struct Identity {
template <typename T>
__forceinline__ __device__ T operator()(const T& value) {
return value;
}
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
__forceinline__ __host__ __device__ int least_pow2_bound(int value) {
unsigned int value_ = static_cast<unsigned int>(value);
--value_;
value_ |= value_ >> 1;
value_ |= value_ >> 2;
value_ |= value_ >> 4;
value_ |= value_ >> 8;
value_ |= value_ >> 16;
return static_cast<int>(++value_);
}
struct Square {
template <typename T>
__forceinline__ __device__ T operator()(const T& value) {
return value * value;
}
};
struct Sqrt {
template <typename T>
__forceinline__ __device__ T operator()(const T& value) {
return _Sqrt(value);
}
};
struct Identity {
template <typename T>
__forceinline__ __device__ T operator()(const T& value) {
return value;
}
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_fp16.h>
#include "core/framework/float16.h"
namespace onnxruntime {
namespace rocm {
// specifies the auxiliary type to use for accumulation of the given type
template <typename T>
struct AccumulationType;
template <>
struct AccumulationType<half> {
using type = float;
};
template <>
struct AccumulationType<float> {
using type = float;
};
template <>
struct AccumulationType<double> {
using type = double;
};
template <>
struct AccumulationType<BFloat16> {
using type = float;
};
template <typename T>
using AccumulationType_t = typename AccumulationType<T>::type;
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_fp16.h>
#include "core/framework/float16.h"
namespace onnxruntime {
namespace cuda {
// specifies the auxiliary type to use for accumulation of the given type
template <typename T>
struct AccumulationType;
template <>
struct AccumulationType<half> {
using type = float;
};
template <>
struct AccumulationType<float> {
using type = float;
};
template <>
struct AccumulationType<double> {
using type = double;
};
template <>
struct AccumulationType<BFloat16> {
using type = float;
};
template <typename T>
using AccumulationType_t = typename AccumulationType<T>::type;
} // namespace cuda
} // namespace onnxruntime
### |
//
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <iostream>
#include <limits>
#include <hip/hip_runtime.h>
#include <cmath>
#include "core/common/common.h"
namespace onnxruntime {
namespace rocm {
// The code below is based on section 4 Unsigned division of paper https://gmplib.org/~tege/divcnst-pldi94.pdf
// In current ORT, fast_divmod is used for calculating the position of a element in tensor,
// so unsigned integer division from the paper is good enough for ORT. The advantage is that div is very simple,
// then GPU compiler can do loop unroll easilly when divmod is called in a loop.
struct fast_divmod {
fast_divmod(int d = 1) {
d_ = d == 0 ? 1 : d;
ORT_ENFORCE(d_ >= 1 && d_ <= static_cast<uint32_t>(std::numeric_limits<int>::max()));
for (l_ = 0; l_ < 32; l_++)
if ((1U << l_) >= d_) break;
uint64_t one = 1;
uint64_t m = ((one << 32) * ((one << l_) - d_)) / d_ + 1;
M_ = static_cast<uint32_t>(m);
// according to paper, the value of m' should fit in a unsigned integer.
ORT_ENFORCE(M_ > 0 && M_ == m);
}
__host__ __device__ inline int div(int n) const {
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
uint32_t t = __umulhi(M_, n);
return (t + n) >> l_;
#else
// Using uint64_t for t, then t + n won't overflow.
uint64_t t = ((uint64_t)M_ * n) >> 32;
return static_cast<int>((t + n) >> l_);
#endif
}
__host__ __device__ inline int mod(int n) const {
return n - div(n) * d_;
}
__host__ __device__ inline void divmod(int n, int& q, int& r) const {
q = div(n);
r = n - q * d_;
}
uint32_t d_; // divisor
uint32_t M_; // m' in the paper.
uint32_t l_; // l_ = ceil(log2(d_))
};
} // namespace rocm
} // namespace onnxruntime
### |
//
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <iostream>
#include <limits>
#include <cuda_runtime.h>
#include <cmath>
#include "core/common/common.h"
namespace onnxruntime {
namespace cuda {
// The code below is based on section 4 Unsigned division of paper https://gmplib.org/~tege/divcnst-pldi94.pdf
// In current ORT, fast_divmod is used for calculating the position of a element in tensor,
// so unsigned integer division from the paper is good enough for ORT. The advantage is that div is very simple,
// then GPU compiler can do loop unroll easilly when divmod is called in a loop.
struct fast_divmod {
fast_divmod(int d = 1) {
d_ = d == 0 ? 1 : d;
ORT_ENFORCE(d_ >= 1 && d_ <= static_cast<uint32_t>(std::numeric_limits<int>::max()));
for (l_ = 0; l_ < 32; l_++)
if ((1U << l_) >= d_) break;
uint64_t one = 1;
uint64_t m = ((one << 32) * ((one << l_) - d_)) / d_ + 1;
M_ = static_cast<uint32_t>(m);
// according to paper, the value of m' should fit in a unsigned integer.
ORT_ENFORCE(M_ > 0 && M_ == m);
}
__host__ __device__ inline int div(int n) const {
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
uint32_t t = __umulhi(M_, n);
return (t + n) >> l_;
#else
// Using uint64_t for t, then t + n won't overflow.
uint64_t t = ((uint64_t)M_ * n) >> 32;
return static_cast<int>((t + n) >> l_);
#endif
}
__host__ __device__ inline int mod(int n) const {
return n - div(n) * d_;
}
__host__ __device__ inline void divmod(int n, int& q, int& r) const {
q = div(n);
r = n - q * d_;
}
uint32_t d_; // divisor
uint32_t M_; // m' in the paper.
uint32_t l_; // l_ = ceil(log2(d_))
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
Status GemmInt8(int m,
int n,
int k,
int32_t alpha_matmul,
int32_t beta_matmul,
const int8_t* a,
int lda,
const int8_t* b,
int ldb,
int32_t* c,
int ldc,
const RocmKernel* rocm_kernel,
onnxruntime::Stream* stream);
}
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
Status GemmInt8(int m,
int n,
int k,
int32_t alpha_matmul,
int32_t beta_matmul,
const int8_t* a,
int lda,
const int8_t* b,
int ldb,
int32_t* c,
int ldc,
const CudaKernel* cuda_kernel,
onnxruntime::Stream* stream);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename SrcT>
class Cast final : public RocmKernel {
public:
Cast(const OpKernelInfo& info) : RocmKernel(info) {
int64_t to;
Status status = info.GetAttr("to", &to);
ORT_ENFORCE(status.IsOK(), "Attribute to is not set.");
to_ = gsl::narrow_cast<ONNX_NAMESPACE::TensorProto_DataType>(to);
int64_t saturate = info.GetAttrOrDefault("saturate", int64_t{1});
if (saturate == 0 &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FNUZ &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2 &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2FNUZ) {
ORT_THROW("Attribute saturate is only used for cast to float 8 types.");
}
saturate_ = saturate == 1;
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ONNX_NAMESPACE::TensorProto_DataType to_;
bool saturate_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename SrcT>
class Cast final : public CudaKernel {
public:
Cast(const OpKernelInfo& info) : CudaKernel(info) {
int64_t to;
Status status = info.GetAttr("to", &to);
ORT_ENFORCE(status.IsOK(), "Attribute to is not set.");
to_ = gsl::narrow_cast<ONNX_NAMESPACE::TensorProto_DataType>(to);
int64_t saturate = info.GetAttrOrDefault("saturate", int64_t{1});
if (saturate == 0 &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FNUZ &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2 &&
to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2FNUZ) {
ORT_THROW("Attribute saturate is only used for cast to float 8 types.");
}
saturate_ = saturate == 1;
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ONNX_NAMESPACE::TensorProto_DataType to_;
bool saturate_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class Compress final : public RocmKernel {
public:
Compress(const OpKernelInfo& info) : RocmKernel(info) {
has_axis_ = info.GetAttr("axis", &axis_).IsOK();
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
bool has_axis_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class Compress final : public CudaKernel {
public:
Compress(const OpKernelInfo& info) : CudaKernel(info) {
has_axis_ = info.GetAttr("axis", &axis_).IsOK();
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
bool has_axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace rocm {
hipError_t CompressCalcPrefixSumTempStorageBytes(hipStream_t stream, const int8_t* condition_data,
int32_t* condition_cumulative_sum, int length, size_t& temp_storage_bytes);
hipError_t CompressInclusivePrefixSum(hipStream_t stream, void* d_temp_storage, size_t temp_storage_bytes,
const int8_t* condition_data, int32_t* condition_cumulative_sum, int length);
Status CompressImpl(hipStream_t stream,
const size_t element_bytes,
const int32_t valid_condition_length,
const int32_t axis_right_stride,
const int32_t input_axis_dim_length,
const int32_t output_axis_dim_length,
const int32_t* condition_cumulative_sum,
const bool* condition_data,
const void* input_data,
void* output_data,
const size_t N);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace cuda {
cudaError_t CompressCalcPrefixSumTempStorageBytes(cudaStream_t stream, const int8_t* condition_data,
int32_t* condition_cumulative_sum, int length, size_t& temp_storage_bytes);
cudaError_t CompressInclusivePrefixSum(cudaStream_t stream, void* d_temp_storage, size_t temp_storage_bytes,
const int8_t* condition_data, int32_t* condition_cumulative_sum, int length);
Status CompressImpl(cudaStream_t stream,
const size_t element_bytes,
const int32_t valid_condition_length,
const int32_t axis_right_stride,
const int32_t input_axis_dim_length,
const int32_t output_axis_dim_length,
const int32_t* condition_cumulative_sum,
const bool* condition_data,
const void* input_data,
void* output_data,
const size_t N);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/tensor/concatbase.h"
namespace onnxruntime {
namespace rocm {
class Concat final : public RocmKernel, public ConcatBase {
public:
Concat(const OpKernelInfo& info) : RocmKernel(info), ConcatBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/tensor/concatbase.h"
namespace onnxruntime {
namespace cuda {
class Concat final : public CudaKernel, public ConcatBase {
public:
Concat(const OpKernelInfo& info) : CudaKernel(info), ConcatBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace rocm {
template <typename InputDataArray>
Status ConcatSameConcatDimImpl(hipStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t concat_size, void* output_data,
const InputDataArray input_data, const size_t output_size);
Status ConcatImpl(hipStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t* concat_sizes, const int64_t* concat_sizes_range,
const int64_t* axis_dimension_input_output_mapping, void* output_data, const void** input_data,
const size_t output_size);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace cuda {
template <typename InputDataArray>
Status ConcatSameConcatDimImpl(cudaStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t concat_size, void* output_data,
const InputDataArray input_data, const size_t output_size);
Status ConcatImpl(cudaStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,
const int block_size_inside_axis_dim, const int64_t* concat_sizes, const int64_t* concat_sizes_range,
const int64_t* axis_dimension_input_output_mapping, void* output_data, const void** input_data,
const size_t output_size);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class Expand final : public RocmKernel {
public:
Expand(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
Status ComputeOutputShape(
const std::string& node_name,
const TensorShape& lhs_shape,
const TensorShape& rhs_shape,
TensorShape& out_shape);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class Expand final : public CudaKernel {
public:
Expand(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
Status ComputeOutputShape(
const std::string& node_name,
const TensorShape& lhs_shape,
const TensorShape& rhs_shape,
TensorShape& out_shape);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace onnxruntime {
// Throw if "ptr" is not allocated on the ROCM device obtained by hipGetDevice.
void CheckIfMemoryOnCurrentGpuDevice(const void* ptr);
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
namespace onnxruntime {
// Throw if "ptr" is not allocated on the CUDA device obtained by cudaGetDevice.
void CheckIfMemoryOnCurrentGpuDevice(const void* ptr);
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
Status ExpandImpl(
hipStream_t stream,
const size_t element_size,
const int N_output,
const int N_input,
const void* input_data,
void* output_data,
const TArray<fast_divmod>& output_strides,
const TArray<int64_t>& input_strides);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
Status ExpandImpl(
cudaStream_t stream,
const size_t element_size,
const int N_output,
const int N_input,
const void* input_data,
void* output_data,
const TArray<fast_divmod>& output_strides,
const TArray<int64_t>& input_strides);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class EyeLike final : public RocmKernel {
public:
EyeLike(const OpKernelInfo& info) : RocmKernel(info) {
if (!info.GetAttr("k", &k_).IsOK()) {
k_ = 0;
}
has_dtype_ = info.GetAttr("dtype", &dtype_).IsOK();
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool has_dtype_;
int64_t dtype_;
int64_t k_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class EyeLike final : public CudaKernel {
public:
EyeLike(const OpKernelInfo& info) : CudaKernel(info) {
if (!info.GetAttr("k", &k_).IsOK()) {
k_ = 0;
}
has_dtype_ = info.GetAttr("dtype", &dtype_).IsOK();
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool has_dtype_;
int64_t dtype_;
int64_t k_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "eye_like_impl.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void _EyeLikeKernel(
size_t offset,
size_t stripe,
T* output_data,
HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
// offset is the first elements, stripe is width + 1.
output_data[offset + id * stripe] = static_cast<T>(1);
}
template <typename T>
void EyeLikeImpl(
hipStream_t stream,
size_t offset,
size_t stripe,
T* output_data,
size_t diag_count) {
constexpr int block_size = 256;
int blocksPerGrid = (int)(ceil(static_cast<float>(diag_count) / block_size));
HIP_LONG N = static_cast<HIP_LONG>(diag_count);
_EyeLikeKernel<<<blocksPerGrid, block_size, 0, stream>>>(offset, stripe, output_data, N);
}
#define SPECIALIZED_IMPL(T) \
template void EyeLikeImpl<T>( \
hipStream_t stream, \
size_t offset, \
size_t stripe, \
T* output_data, \
size_t diag_count);
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "eye_like_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _EyeLikeKernel(
size_t offset,
size_t stripe,
T* output_data,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
// offset is the first elements, stripe is width + 1.
output_data[offset + id * stripe] = static_cast<T>(1);
}
template <typename T>
void EyeLikeImpl(
cudaStream_t stream,
size_t offset,
size_t stripe,
T* output_data,
size_t diag_count) {
constexpr int block_size = 256;
int blocksPerGrid = (int)(ceil(static_cast<float>(diag_count) / block_size));
CUDA_LONG N = static_cast<CUDA_LONG>(diag_count);
_EyeLikeKernel<<<blocksPerGrid, block_size, 0, stream>>>(offset, stripe, output_data, N);
}
#define SPECIALIZED_IMPL(T) \
template void EyeLikeImpl<T>( \
cudaStream_t stream, \
size_t offset, \
size_t stripe, \
T* output_data, \
size_t diag_count);
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/shared_inc/fast_divmod.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void EyeLikeImpl(
hipStream_t stream,
size_t offset, // offset of first element in diagnal
size_t stripe, // stripe, here it's width + 1
T* output_data, // output buffer
size_t diag_count // total number of elements in diagnal
);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/shared_inc/fast_divmod.h"
#include "core/common/common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void EyeLikeImpl(
cudaStream_t stream,
size_t offset, // offset of first element in diagnal
size_t stripe, // stripe, here it's width + 1
T* output_data, // output buffer
size_t diag_count // total number of elements in diagnal
);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "flatten.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Flatten,
kOnnxDomain,
1, 8,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Flatten,
kOnnxDomain,
9, 10,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
// explicitly support negative axis
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Flatten,
kOnnxDomain,
11, 12,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
ONNX_OPERATOR_KERNEL_EX(
Flatten,
kOnnxDomain,
13,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
Status Flatten::ComputeInternal(OpKernelContext* ctx) const {
const Tensor* X = ctx->Input<Tensor>(0);
const TensorShape& X_shape = X->Shape();
auto axis = axis_;
// Valid axis range is [-rank, rank] instead of [-rank, rank-1], add additional check to only handle neg axis case.
if (axis < 0) {
axis = HandleNegativeAxis(axis, X_shape.NumDimensions()); // handle negative and enforce axis is valid
}
ORT_ENFORCE(gsl::narrow_cast<int64_t>(X_shape.NumDimensions()) >= axis, "The rank of input tensor must be >= axis");
Tensor* Y = ctx->Output(0, {X_shape.SizeToDimension(axis), X_shape.SizeFromDimension(axis)});
// If source and target pointers are not equal (non-inplace operation), we need to copy the data.
const void* source = X->DataRaw();
void* target = Y->MutableDataRaw();
if (target != source) {
HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, X_shape.Size() * X->DataType()->Size(),
hipMemcpyDeviceToDevice, Stream(ctx)));
}
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "flatten.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Flatten,
kOnnxDomain,
1, 8,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Flatten,
kOnnxDomain,
9, 10,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
// explicitly support negative axis
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Flatten,
kOnnxDomain,
11, 12,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
ONNX_OPERATOR_KERNEL_EX(
Flatten,
kOnnxDomain,
13,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Flatten);
Status Flatten::ComputeInternal(OpKernelContext* ctx) const {
const Tensor* X = ctx->Input<Tensor>(0);
const TensorShape& X_shape = X->Shape();
auto axis = axis_;
// Valid axis range is [-rank, rank] instead of [-rank, rank-1], add additional check to only handle neg axis case.
if (axis < 0) {
axis = HandleNegativeAxis(axis, X_shape.NumDimensions()); // handle negative and enforce axis is valid
}
ORT_ENFORCE(gsl::narrow_cast<int64_t>(X_shape.NumDimensions()) >= axis, "The rank of input tensor must be >= axis");
Tensor* Y = ctx->Output(0, {X_shape.SizeToDimension(axis), X_shape.SizeFromDimension(axis)});
// If source and target pointers are not equal (non-inplace operation), we need to copy the data.
const void* source = X->DataRaw();
void* target = Y->MutableDataRaw();
if (target != source) {
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, X_shape.Size() * X->DataType()->Size(),
cudaMemcpyDeviceToDevice, Stream(ctx)));
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class Flatten final : public RocmKernel {
public:
Flatten(const OpKernelInfo& info) : RocmKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class Flatten final : public CudaKernel {
public:
Flatten(const OpKernelInfo& info) : CudaKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "core/providers/rocm/tensor/gather_impl.h"
#include "core/providers/rocm/tensor/gather.h"
#include "core/providers/cpu/tensor/utils.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Gather, kOnnxDomain, 1, 10, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), Gather);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Gather, kOnnxDomain, 11, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), Gather);
ONNX_OPERATOR_KERNEL_EX(
Gather, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), Gather);
Status Gather::ComputeInternal(OpKernelContext* context) const {
Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));
const TensorShape& input_shape = p.input_tensor->Shape();
const int64_t block_size = input_shape.SizeFromDimension(p.axis + 1);
size_t N = p.indices_tensor->Shape().Size();
const int64_t input_block_size = input_shape.SizeFromDimension(p.axis);
const int64_t output_block_size = N * block_size;
const int64_t indices_max = input_shape[p.axis];
const void* input_data = p.input_tensor->DataRaw();
const void* indices_data = p.indices_tensor->DataRaw();
void* output_data = p.output_tensor->MutableDataRaw();
if (p.output_tensor->Shape().Size() == 0) {
return Status::OK();
}
const fast_divmod divmod_output_block_size(gsl::narrow_cast<int>(output_block_size));
const fast_divmod divmod_block_size(gsl::narrow_cast<int>(block_size));
const size_t element_size = p.input_tensor->DataType()->Size();
const size_t index_element_size = p.indices_tensor->DataType()->Size();
if (p.indices_tensor->IsDataType<int32_t>() ||
p.indices_tensor->IsDataType<int64_t>()) {
GatherImpl(
Stream(context), input_block_size, indices_max, divmod_output_block_size, divmod_block_size, indices_data, index_element_size, input_data, element_size, output_data, p.output_tensor->Shape().Size());
return Status::OK();
}
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type for Tind not supported yet in Gather.");
}
}
} ### |
#include "core/providers/cuda/tensor/gather_impl.h"
#include "core/providers/cuda/tensor/gather.h"
#include "core/providers/cpu/tensor/utils.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Gather, kOnnxDomain, 1, 10, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), Gather);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Gather, kOnnxDomain, 11, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), Gather);
ONNX_OPERATOR_KERNEL_EX(
Gather, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(), DataTypeImpl::GetTensorType<int64_t>()}), Gather);
Status Gather::ComputeInternal(OpKernelContext* context) const {
Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));
const TensorShape& input_shape = p.input_tensor->Shape();
const int64_t block_size = input_shape.SizeFromDimension(p.axis + 1);
size_t N = p.indices_tensor->Shape().Size();
const int64_t input_block_size = input_shape.SizeFromDimension(p.axis);
const int64_t output_block_size = N * block_size;
const int64_t indices_max = input_shape[p.axis];
const void* input_data = p.input_tensor->DataRaw();
const void* indices_data = p.indices_tensor->DataRaw();
void* output_data = p.output_tensor->MutableDataRaw();
if (p.output_tensor->Shape().Size() == 0) {
return Status::OK();
}
const fast_divmod divmod_output_block_size(gsl::narrow_cast<int>(output_block_size));
const fast_divmod divmod_block_size(gsl::narrow_cast<int>(block_size));
const size_t element_size = p.input_tensor->DataType()->Size();
const size_t index_element_size = p.indices_tensor->DataType()->Size();
if (p.indices_tensor->IsDataType<int32_t>() ||
p.indices_tensor->IsDataType<int64_t>()) {
GatherImpl(
Stream(context), input_block_size, indices_max, divmod_output_block_size, divmod_block_size, indices_data, index_element_size, input_data, element_size, output_data, p.output_tensor->Shape().Size());
return Status::OK();
}
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, "Type for Tind not supported yet in Gather.");
}
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/tensor/gatherbase.h"
namespace onnxruntime {
namespace rocm {
class Gather : public RocmKernel, public GatherBase {
public:
Gather(const OpKernelInfo& info) : RocmKernel(info), GatherBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/tensor/gatherbase.h"
namespace onnxruntime {
namespace cuda {
class Gather : public CudaKernel, public GatherBase {
public:
Gather(const OpKernelInfo& info) : CudaKernel(info), GatherBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/shared_library/provider_api.h"
namespace onnxruntime {
namespace rocm {
struct GatherScatterElementsArgs;
// Coalesce those contiguous axes that have same dim values for both input and indices (exclude the gather/scatter axis)
// so that we will have less divmod to compute during the kernels.
// For example:
// shape(input)=[2,2,2], shape(indices)=[2,2,3], axis=2 is same as shape(input)=[4,2], shape(indices)=[4,3], axis=1
// shape(input)=[2,1,2,2,3,2,2], shape(indices)=[2,1,2,2,2,2,2], axis=3) is same as
// shape(input)=[4,2,3,4],shape(indices)=[4,2,2,4], axis=1
// If indices is strided, dim i (outer) and dim j is contiguous when strides[i] = shape[j] * strides[j].
// For example:
// shape(indices)=[2,3,4,5], strides(indices)=[0,20,5,1], then dim-2 and dim-3 is contiguous (5==5*1),
// dim-1 and dim-2 is contiguous (20==4*5), but dim-0 and dim-1 is not contiguous (0!=3*20).
void CoalesceDimensions(TensorShapeVector& input_shape, TensorShapeVector& indices_shape,
TensorShapeVector* p_indices_strides, int64_t axis, GatherScatterElementsArgs& args);
ONNX_NAMESPACE::TensorProto_DataType GetElementType(size_t element_size);
class GatherElements final : public RocmKernel {
public:
GatherElements(const OpKernelInfo& info) : RocmKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value");
}
~GatherElements() = default;
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct ComputeImpl;
int64_t axis_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/shared_library/provider_api.h"
namespace onnxruntime {
namespace cuda {
struct GatherScatterElementsArgs;
// Coalesce those contiguous axes that have same dim values for both input and indices (exclude the gather/scatter axis)
// so that we will have less divmod to compute during the kernels.
// For example:
// shape(input)=[2,2,2], shape(indices)=[2,2,3], axis=2 is same as shape(input)=[4,2], shape(indices)=[4,3], axis=1
// shape(input)=[2,1,2,2,3,2,2], shape(indices)=[2,1,2,2,2,2,2], axis=3) is same as
// shape(input)=[4,2,3,4],shape(indices)=[4,2,2,4], axis=1
// If indices is strided, dim i (outer) and dim j is contiguous when strides[i] = shape[j] * strides[j].
// For example:
// shape(indices)=[2,3,4,5], strides(indices)=[0,20,5,1], then dim-2 and dim-3 is contiguous (5==5*1),
// dim-1 and dim-2 is contiguous (20==4*5), but dim-0 and dim-1 is not contiguous (0!=3*20).
void CoalesceDimensions(TensorShapeVector& input_shape, TensorShapeVector& indices_shape,
TensorShapeVector* p_indices_strides, int64_t axis, GatherScatterElementsArgs& args);
ONNX_NAMESPACE::TensorProto_DataType GetElementType(size_t element_size);
class GatherElements final : public CudaKernel {
public:
GatherElements(const OpKernelInfo& info) : CudaKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("axis", &axis_).IsOK(), "Missing/Invalid 'axis' attribute value");
}
~GatherElements() = default;
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct ComputeImpl;
int64_t axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
struct GatherScatterElementsArgs {
int64_t rank;
int64_t axis;
int64_t input_size;
int64_t input_dim_along_axis;
int64_t input_stride_along_axis;
TArray<int64_t> masked_input_strides;
TArray<fast_divmod> indices_fdms;
TArray<int64_t> indices_strides;
int64_t indices_size;
};
template <typename T, typename TIndex>
void GatherElementsImpl(hipStream_t stream, const T* input_data, const TIndex* indices_data, T* output_data,
const GatherScatterElementsArgs& args);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
struct GatherScatterElementsArgs {
int64_t rank;
int64_t axis;
int64_t input_size;
int64_t input_dim_along_axis;
int64_t input_stride_along_axis;
TArray<int64_t> masked_input_strides;
TArray<fast_divmod> indices_fdms;
TArray<int64_t> indices_strides;
int64_t indices_size;
};
template <typename T, typename TIndex>
void GatherElementsImpl(cudaStream_t stream, const T* input_data, const TIndex* indices_data, T* output_data,
const GatherScatterElementsArgs& args);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/rocm_graph.h"
#include "core/providers/rocm/rocm_common.h"
#include <hip/hip_runtime_api.h>
#include <hip/driver_types.h>
namespace onnxruntime {
ROCMGraph::ROCMGraph(hipStream_t stream) : stream_(stream) {
}
void ROCMGraph::SetStream(hipStream_t stream) {
stream_ = stream;
}
void ROCMGraph::CaptureBegin() {
ORT_ENFORCE(!has_graph_exec_,
"This rocm graph has already captured a graph. "
"Create a new instance to capture a new graph.");
HIP_CALL_THROW(hipStreamSynchronize(stream_));
// For now rocm graph can only work with a single thread. In the future, we
// will support multiple threads. For multiple threads with multiple graphs
// and streams, `hipStreamCaptureModeGlobal` needs to be changed to
// `hipStreamCaptureModeThreadLocal`
HIP_CALL_THROW(hipStreamBeginCapture(stream_, hipStreamCaptureModeGlobal));
}
void ROCMGraph::CaptureEnd() {
HIP_CALL_THROW(hipStreamEndCapture(stream_, &graph_));
if (graph_ == NULL) {
ORT_THROW("ROCMGraph::CaptureEnd: graph_ is NULL");
}
has_graph_ = true;
HIP_CALL_THROW(hipGraphInstantiate(&graph_exec_, graph_, NULL, NULL, 0));
has_graph_exec_ = true;
HIP_CALL_THROW(hipGraphDestroy(graph_));
has_graph_ = false;
}
Status ROCMGraph::Replay() {
// Although this function is not thread safe, the lock is not needed here because
// ROCM EP maintains a separate rocm graph per thread
LOGS_DEFAULT(INFO) << "Replaying ROCM graph on stream " << stream_;
HIP_RETURN_IF_ERROR(hipGraphLaunch(graph_exec_, stream_));
HIP_RETURN_IF_ERROR(hipStreamSynchronize(stream_));
return Status::OK();
}
void ROCMGraph::Reset() {
if (has_graph_) {
HIP_CALL_THROW(hipGraphDestroy(graph_));
has_graph_ = false;
}
if (has_graph_exec_) {
HIP_CALL_THROW(hipGraphExecDestroy(graph_exec_));
has_graph_exec_ = false;
}
}
ROCMGraph::~ROCMGraph() {
Reset();
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_graph.h"
#include "core/providers/cuda/cuda_common.h"
#include <cuda_runtime_api.h>
#include <driver_types.h>
namespace onnxruntime {
CUDAGraph::CUDAGraph(cudaStream_t stream) : stream_(stream) {
}
void CUDAGraph::SetStream(cudaStream_t stream) {
stream_ = stream;
}
void CUDAGraph::CaptureBegin() {
ORT_ENFORCE(!has_graph_exec_,
"This cuda graph has already captured a graph. "
"Create a new instance to capture a new graph.");
CUDA_CALL_THROW(cudaStreamSynchronize(stream_));
// For now cuda graph can only work with a single thread. In the future, we
// will support multiple threads. For multiple threads with multiple graphs
// and streams, `cudaStreamCaptureModeGlobal` needs to be changed to
// `cudaStreamCaptureModeThreadLocal`
CUDA_CALL_THROW(cudaStreamBeginCapture(stream_, cudaStreamCaptureModeGlobal));
}
void CUDAGraph::CaptureEnd() {
CUDA_CALL_THROW(cudaStreamEndCapture(stream_, &graph_));
if (graph_ == NULL) {
ORT_THROW("CUDAGraph::CaptureEnd: graph_ is NULL");
}
has_graph_ = true;
CUDA_CALL_THROW(cudaGraphInstantiate(&graph_exec_, graph_, NULL, NULL, 0));
has_graph_exec_ = true;
CUDA_CALL_THROW(cudaGraphDestroy(graph_));
has_graph_ = false;
}
Status CUDAGraph::Replay() {
// Although this function is not thread safe, the lock is not needed here because
// CUDA EP maintains a separate cuda graph per thread
LOGS_DEFAULT(INFO) << "Replaying CUDA graph on stream " << stream_;
CUDA_RETURN_IF_ERROR(cudaGraphLaunch(graph_exec_, stream_));
CUDA_RETURN_IF_ERROR(cudaStreamSynchronize(stream_));
return Status::OK();
}
void CUDAGraph::Reset() {
if (has_graph_) {
CUDA_CALL_THROW(cudaGraphDestroy(graph_));
has_graph_ = false;
}
if (has_graph_exec_) {
CUDA_CALL_THROW(cudaGraphExecDestroy(graph_exec_));
has_graph_exec_ = false;
}
}
CUDAGraph::~CUDAGraph() {
Reset();
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
void GatherImpl(
hipStream_t stream,
const int64_t input_block_size,
const int64_t indices_max,
const fast_divmod& output_block_size,
const fast_divmod& block_size,
const void* indices_data,
size_t index_element_size,
const void* input_data,
size_t element_size,
void* output_data,
const size_t N);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
void GatherImpl(
cudaStream_t stream,
const int64_t input_block_size,
const int64_t indices_max,
const fast_divmod& output_block_size,
const fast_divmod& block_size,
const void* indices_data,
size_t index_element_size,
const void* input_data,
size_t element_size,
void* output_data,
const size_t N);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
Status CheckBatchDimensionsMatch(
size_t num_batch_dimensions,
const std::vector<std::reference_wrapper<TensorShape>>& tensor_shapes);
class GatherNDBase : public RocmKernel {
public:
GatherNDBase(const OpKernelInfo& info) : RocmKernel(info) {
info.GetAttrOrDefault("batch_dims", &batch_dims_, static_cast<int64_t>(0));
ORT_ENFORCE(batch_dims_ >= 0);
}
protected:
template <typename TIndex>
Status PrepareCompute(
onnxruntime::Stream* stream,
const int64_t batch_dims,
const TensorShape& input_shape,
const TensorShape& indices_shape,
const Tensor* indices_tensor,
int64_t& num_slices,
int64_t& slice_size,
IAllocatorUniquePtr<int64_t>& input_slice_offsets_buffer) const;
int64_t batch_dims_;
};
template <typename Tind>
class GatherND final : public GatherNDBase {
public:
GatherND(const OpKernelInfo& info) : GatherNDBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
Status CheckBatchDimensionsMatch(
size_t num_batch_dimensions,
const std::vector<std::reference_wrapper<TensorShape>>& tensor_shapes);
class GatherNDBase : public CudaKernel {
public:
GatherNDBase(const OpKernelInfo& info) : CudaKernel(info) {
info.GetAttrOrDefault("batch_dims", &batch_dims_, static_cast<int64_t>(0));
ORT_ENFORCE(batch_dims_ >= 0);
}
protected:
template <typename TIndex>
Status PrepareCompute(
onnxruntime::Stream* stream,
const int64_t batch_dims,
const TensorShape& input_shape,
const TensorShape& indices_shape,
const Tensor* indices_tensor,
int64_t& num_slices,
int64_t& slice_size,
IAllocatorUniquePtr<int64_t>& input_slice_offsets_buffer) const;
int64_t batch_dims_;
};
template <typename Tind>
class GatherND final : public GatherNDBase {
public:
GatherND(const OpKernelInfo& info) : GatherNDBase(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename TIndex>
void ComputeSliceOffsetsImpl(
hipStream_t stream,
const int64_t batch_dims,
const TArray<int64_t> input_dims,
const size_t num_slices,
const size_t num_slices_per_batch,
const size_t input_batch_stride,
const size_t num_slice_dims,
const int64_t* const sizes_from_slice_dims_data, // num_slice_dims elements
const TIndex* const indices_data, // num_slices * num_slice_dims elements
int64_t* const input_slice_offsets_data); // num_slices elements
template <typename T>
void GatherNDImpl(
hipStream_t stream,
const size_t num_slices,
const void* input_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data);
#ifdef ENABLE_TRAINING_OPS
template <typename T>
void GatherNDGradImpl(
hipStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data);
#endif
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename TIndex>
void ComputeSliceOffsetsImpl(
cudaStream_t stream,
const int64_t batch_dims,
const TArray<int64_t> input_dims,
const size_t num_slices,
const size_t num_slices_per_batch,
const size_t input_batch_stride,
const size_t num_slice_dims,
const int64_t* const sizes_from_slice_dims_data, // num_slice_dims elements
const TIndex* const indices_data, // num_slices * num_slice_dims elements
int64_t* const input_slice_offsets_data); // num_slices elements
template <typename T>
void GatherNDImpl(
cudaStream_t stream,
const size_t num_slices,
const void* input_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data);
#ifdef ENABLE_TRAINING_OPS
template <typename T>
void GatherNDGradImpl(
cudaStream_t stream,
const size_t num_slices,
const void* update_data,
void* output_data,
const size_t slice_size,
const int64_t* input_slice_offsets_data);
#endif
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "identity_op.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Dropout,
kOnnxDomain,
7, 9,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", {DataTypeImpl::GetTensorType<MLFloat16>(),
DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<double>()})
.Alias(0, 0),
IdentityOp<true>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Dropout,
kOnnxDomain,
10,
11,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", {DataTypeImpl::GetTensorType<MLFloat16>(),
DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<double>()})
.TypeConstraint("T1", DataTypeImpl::GetTensorType<bool>())
.Alias(0, 0),
IdentityOp<true>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Identity,
kOnnxDomain,
1, 12,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.Alias(0, 0),
IdentityOp<false>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Identity,
kOnnxDomain,
13, 13,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.Alias(0, 0),
IdentityOp<false>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Identity,
kOnnxDomain,
14, 18,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("V", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypes())
.Alias(0, 0),
IdentityOp<false>);
ONNX_OPERATOR_KERNEL_EX(
Identity,
kOnnxDomain,
19,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("V", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypesIRv9())
.Alias(0, 0),
IdentityOp<false>);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "identity_op.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Dropout,
kOnnxDomain,
7, 9,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", {DataTypeImpl::GetTensorType<MLFloat16>(),
DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<double>()})
.Alias(0, 0),
IdentityOp<true>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Dropout,
kOnnxDomain,
10,
11,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", {DataTypeImpl::GetTensorType<MLFloat16>(),
DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<double>()})
.TypeConstraint("T1", DataTypeImpl::GetTensorType<bool>())
.Alias(0, 0),
IdentityOp<true>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Identity,
kOnnxDomain,
1, 12,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.Alias(0, 0),
IdentityOp<false>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Identity,
kOnnxDomain,
13, 13,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.Alias(0, 0),
IdentityOp<false>);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Identity,
kOnnxDomain,
14, 18,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("V", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypes())
.Alias(0, 0),
IdentityOp<false>);
ONNX_OPERATOR_KERNEL_EX(
Identity,
kOnnxDomain,
19,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("V", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypesIRv9())
.Alias(0, 0),
IdentityOp<false>);
} // namespace cuda
} // namespace onnxruntime
### |
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <bool is_dropout>
class IdentityOp final : public RocmKernel {
public:
IdentityOp(const OpKernelInfo& info) : RocmKernel(info) {
}
Status ComputeInternal(OpKernelContext* context) const override {
auto X_ml_type = context->InputType(0);
if (X_ml_type->IsTensorType()) {
const Tensor* X = context->Input<Tensor>(0);
if (nullptr == X) {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp rocm: input count mismatch.");
}
const TensorShape& shape = X->Shape();
Tensor* Y = context->Output(0, shape);
if (nullptr == Y) {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp rocm: failed to allocate output tensor.");
}
auto X_type = X->DataType();
const void* source = X->DataRaw(X_type);
void* target = Y->MutableDataRaw(X_type);
if (target != source) {
HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, X->Shape().Size() * X->DataType()->Size(), hipMemcpyDeviceToDevice, Stream(context)));
}
if (is_dropout) {
Tensor* mask = context->Output(1, shape);
if (mask != nullptr) {
void* mask_data = mask->MutableDataRaw();
HIP_RETURN_IF_ERROR(hipMemsetAsync(mask_data, 0, mask->SizeInBytes(), Stream(context)));
}
}
} else if (X_ml_type->IsTensorSequenceType()) {
const TensorSeq* X = context->Input<TensorSeq>(0);
ORT_ENFORCE(X != nullptr, "IdentityOp rocm: input tensor is missing.");
TensorSeq* Y = context->Output<TensorSeq>(0);
ORT_ENFORCE(Y != nullptr, "IdentityOp rocm: failed to allocate output tensor sequence.");
if (X == Y) {
return Status::OK();
}
auto X_type = X->DataType();
Y->SetType(X_type);
AllocatorPtr alloc;
auto status = context->GetTempSpaceAllocator(&alloc);
if (!status.IsOK()) {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp rocm: unable to get an allocator.");
}
auto X_size = X->Size();
Y->Reserve(X_size);
for (size_t i = 0; i < X_size; ++i) {
const Tensor& source_tensor = X->Get(i);
std::unique_ptr<Tensor> target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);
HIP_RETURN_IF_ERROR(hipMemcpyAsync(target_tensor->MutableDataRaw(), source_tensor.DataRaw(), source_tensor.SizeInBytes(), hipMemcpyDeviceToDevice, Stream(context)));
Y->Add(std::move(*target_tensor));
}
} else {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp rocm: unsupported input type.");
}
return Status::OK();
}
};
}
} ### |
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <bool is_dropout>
class IdentityOp final : public CudaKernel {
public:
IdentityOp(const OpKernelInfo& info) : CudaKernel(info) {
}
Status ComputeInternal(OpKernelContext* context) const override {
auto X_ml_type = context->InputType(0);
if (X_ml_type->IsTensorType()) {
const Tensor* X = context->Input<Tensor>(0);
if (nullptr == X) {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp cuda: input count mismatch.");
}
const TensorShape& shape = X->Shape();
Tensor* Y = context->Output(0, shape);
if (nullptr == Y) {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp cuda: failed to allocate output tensor.");
}
auto X_type = X->DataType();
const void* source = X->DataRaw(X_type);
void* target = Y->MutableDataRaw(X_type);
if (target != source) {
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, X->Shape().Size() * X->DataType()->Size(), cudaMemcpyDeviceToDevice, Stream(context)));
}
if (is_dropout) {
Tensor* mask = context->Output(1, shape);
if (mask != nullptr) {
void* mask_data = mask->MutableDataRaw();
CUDA_RETURN_IF_ERROR(cudaMemsetAsync(mask_data, 0, mask->SizeInBytes(), Stream(context)));
}
}
} else if (X_ml_type->IsTensorSequenceType()) {
const TensorSeq* X = context->Input<TensorSeq>(0);
ORT_ENFORCE(X != nullptr, "IdentityOp cuda: input tensor is missing.");
TensorSeq* Y = context->Output<TensorSeq>(0);
ORT_ENFORCE(Y != nullptr, "IdentityOp cuda: failed to allocate output tensor sequence.");
if (X == Y) {
return Status::OK();
}
auto X_type = X->DataType();
Y->SetType(X_type);
AllocatorPtr alloc;
auto status = context->GetTempSpaceAllocator(&alloc);
if (!status.IsOK()) {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp cuda: unable to get an allocator.");
}
auto X_size = X->Size();
Y->Reserve(X_size);
for (size_t i = 0; i < X_size; ++i) {
const Tensor& source_tensor = X->Get(i);
std::unique_ptr<Tensor> target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target_tensor->MutableDataRaw(), source_tensor.DataRaw(), source_tensor.SizeInBytes(), cudaMemcpyDeviceToDevice, Stream(context)));
Y->Add(std::move(*target_tensor));
}
} else {
return Status(common::ONNXRUNTIME, common::FAIL, "IdentityOp cuda: unsupported input type.");
}
return Status::OK();
}
};
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
int NonZeroCalcBlockCount(int64_t x_size);
hipError_t NonZeroCalcPrefixSumTempStorageBytes(hipStream_t stream, int* prefix_counts, int number_of_blocks, size_t&);
hipError_t NonZeroInclusivePrefixSum(hipStream_t stream, void* d_temp_storage, size_t temp_storage_bytes, int* prefix_counts, int number_of_blocks);
// count nonzero elements in each block into counts_in_blocks,
// the counts_in_blocks buffer is pre-allocated on gpu first.
template <typename InputT>
hipError_t NonZeroCountEachBlock(hipStream_t stream, const InputT* x, int64_t x_size, int* counts_in_blocks);
// output nonzero positions using input x and prefix_counts for each blocks
template <typename InputT>
hipError_t NonZeroOutputPositions(
hipStream_t stream, const InputT* x, int64_t x_size, int x_rank, const TArray<fast_divmod>& x_strides,
const int* prefix_counts, int nonzero_elements, int64_t* results);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
int NonZeroCalcBlockCount(int64_t x_size);
cudaError_t NonZeroCalcPrefixSumTempStorageBytes(cudaStream_t stream, int* prefix_counts, int number_of_blocks, size_t&);
cudaError_t NonZeroInclusivePrefixSum(cudaStream_t stream, void* d_temp_storage, size_t temp_storage_bytes, int* prefix_counts, int number_of_blocks);
// count nonzero elements in each block into counts_in_blocks,
// the counts_in_blocks buffer is pre-allocated on gpu first.
template <typename InputT>
cudaError_t NonZeroCountEachBlock(cudaStream_t stream, const InputT* x, int64_t x_size, int* counts_in_blocks);
// output nonzero positions using input x and prefix_counts for each blocks
template <typename InputT>
cudaError_t NonZeroOutputPositions(
cudaStream_t stream, const InputT* x, int64_t x_size, int x_rank, const TArray<fast_divmod>& x_strides,
const int* prefix_counts, int nonzero_elements, int64_t* results);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class NonZero final : public RocmKernel {
public:
NonZero(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class NonZero final : public CudaKernel {
public:
NonZero(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename in_type, typename out_type>
void OneHotImpl(
hipStream_t stream,
const in_type* indices,
const fast_divmod fdm_depth_suffix,
const fast_divmod fdm_suffix,
const int64_t depth_val,
const out_type on_value,
const out_type off_value,
out_type* output,
size_t count);
template <typename in_type, typename out_type>
void OneHotWithZeroOffValueImpl(
hipStream_t stream,
const in_type* indices,
const fast_divmod fdm_suffix,
const int64_t depth_val,
const out_type on_value,
out_type* output,
size_t count);
template <typename in_type, typename out_type, typename depth_type>
class OneHotOp final : public RocmKernel {
public:
explicit OneHotOp(const OpKernelInfo& info) : RocmKernel(info) {
int64_t tmp_axis;
if (info.GetAttr<int64_t>("axis", &tmp_axis).IsOK()) {
axis_ = tmp_axis;
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OneHotOp);
int64_t axis_ = -1;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename in_type, typename out_type>
void OneHotImpl(
cudaStream_t stream,
const in_type* indices,
const fast_divmod fdm_depth_suffix,
const fast_divmod fdm_suffix,
const int64_t depth_val,
const out_type on_value,
const out_type off_value,
out_type* output,
size_t count);
template <typename in_type, typename out_type>
void OneHotWithZeroOffValueImpl(
cudaStream_t stream,
const in_type* indices,
const fast_divmod fdm_suffix,
const int64_t depth_val,
const out_type on_value,
out_type* output,
size_t count);
template <typename in_type, typename out_type, typename depth_type>
class OneHotOp final : public CudaKernel {
public:
explicit OneHotOp(const OpKernelInfo& info) : CudaKernel(info) {
int64_t tmp_axis;
if (info.GetAttr<int64_t>("axis", &tmp_axis).IsOK()) {
axis_ = tmp_axis;
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OneHotOp);
int64_t axis_ = -1;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/tensor/padbase.h"
using onnxruntime::PadBase;
namespace onnxruntime {
namespace rocm {
template <typename T>
class Pad final : public PadBase, public RocmKernel {
public:
Pad(const OpKernelInfo& info) : PadBase(info), RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/tensor/padbase.h"
using onnxruntime::PadBase;
namespace onnxruntime {
namespace cuda {
template <typename T>
class Pad final : public PadBase, public CudaKernel {
public:
Pad(const OpKernelInfo& info) : PadBase(info), CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void PadNCHWInputWithPaddingAlongHAndWImpl(
hipStream_t stream,
const int64_t n, // Batch
const int64_t c, // Channel
const int64_t input_height,
const int64_t output_height,
const int64_t input_width,
const int64_t output_width,
const int64_t pad_height_start,
const int64_t pad_width_start,
const T pad_value,
const int pad_mode,
const T* input_data,
T* output_data,
const size_t N);
template <typename T>
void PadImpl(
hipStream_t stream,
const size_t shape_rank,
const TArray<int64_t>& input_dims,
const TArray<int64_t>& input_strides,
const TArray<int64_t>& lower_pads,
const T pad_value,
const int pad_mode,
const T* input_data,
const TArray<fast_divmod>& fdm_output_strides,
T* output_data,
const size_t N);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void PadNCHWInputWithPaddingAlongHAndWImpl(
cudaStream_t stream,
const int64_t n, // Batch
const int64_t c, // Channel
const int64_t input_height,
const int64_t output_height,
const int64_t input_width,
const int64_t output_width,
const int64_t pad_height_start,
const int64_t pad_width_start,
const T pad_value,
const int pad_mode,
const T* input_data,
T* output_data,
const size_t N);
template <typename T>
void PadImpl(
cudaStream_t stream,
const size_t shape_rank,
const TArray<int64_t>& input_dims,
const TArray<int64_t>& input_strides,
const TArray<int64_t>& lower_pads,
const T pad_value,
const int pad_mode,
const T* input_data,
const TArray<fast_divmod>& fdm_output_strides,
T* output_data,
const size_t N);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/platform/ort_mutex.h"
#include "core/providers/rocm/rocm_pch.h"
namespace onnxruntime {
using CaptureId_t = unsigned long long;
struct ROCMGraph {
ROCMGraph(){};
ROCMGraph(hipStream_t stream);
~ROCMGraph();
void SetStream(hipStream_t stream);
void CaptureBegin();
void CaptureEnd();
Status Replay();
void Reset();
private:
hipGraph_t graph_ = NULL;
hipGraphExec_t graph_exec_ = NULL;
bool has_graph_ = false;
bool has_graph_exec_ = false;
hipStream_t stream_ = nullptr; // Does not own the stream
};
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/platform/ort_mutex.h"
#include "core/providers/cuda/cuda_pch.h"
namespace onnxruntime {
using CaptureId_t = unsigned long long;
struct CUDAGraph {
CUDAGraph(){};
CUDAGraph(cudaStream_t stream);
~CUDAGraph();
void SetStream(cudaStream_t stream);
void CaptureBegin();
void CaptureEnd();
Status Replay();
void Reset();
private:
cudaGraph_t graph_ = NULL;
cudaGraphExec_t graph_exec_ = NULL;
bool has_graph_ = false;
bool has_graph_exec_ = false;
cudaStream_t stream_ = nullptr; // Does not own the stream
};
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "quantize_linear.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <class T, class U>
Status CudaQuantizeLinearStd(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element);
template <class T, class U>
Status CudaQuantizeLinearSat(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,
bool saturate);
template <class T, class U>
Status CudaQuantizeLinearAxisStd(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales);
template <class T, class U>
Status CudaQuantizeLinearAxisSat(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales, bool saturate);
template <class T, class U>
Status CudaDequantizeLinearStd(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);
template <class T, class U>
Status CudaDequantizeLinearSat(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);
template <class T, class U>
Status CudaDequantizeLinearAxisStd(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales);
template <class T, class U>
Status CudaDequantizeLinearAxisSat(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "quantize_linear.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <class T, class U>
Status CudaQuantizeLinearStd(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element);
template <class T, class U>
Status CudaQuantizeLinearSat(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,
bool saturate);
template <class T, class U>
Status CudaQuantizeLinearAxisStd(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales);
template <class T, class U>
Status CudaQuantizeLinearAxisSat(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales, bool saturate);
template <class T, class U>
Status CudaDequantizeLinearStd(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);
template <class T, class U>
Status CudaDequantizeLinearSat(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);
template <class T, class U>
Status CudaDequantizeLinearAxisStd(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales);
template <class T, class U>
Status CudaDequantizeLinearAxisSat(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,
size_t batch_size, size_t n_scales);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <class T, class U>
class QuantizeLinear final : public RocmKernel {
public:
QuantizeLinear(const OpKernelInfo& info) : RocmKernel(info) {
if (!info.GetAttr<int64_t>("axis", &axis_).IsOK()) {
axis_ = 1;
}
if (!info.GetAttr<int64_t>("saturate", &saturate_).IsOK()) {
saturate_ = 1;
}
}
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
int64_t axis_;
int64_t saturate_;
};
template <class T, class U>
class DequantizeLinear final : public RocmKernel {
public:
DequantizeLinear(const OpKernelInfo& info) : RocmKernel(info) {
if (!info.GetAttr<int64_t>("axis", &axis_).IsOK()) {
axis_ = 1;
}
}
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
int64_t axis_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <class T, class U>
class QuantizeLinear final : public CudaKernel {
public:
QuantizeLinear(const OpKernelInfo& info) : CudaKernel(info) {
if (!info.GetAttr<int64_t>("axis", &axis_).IsOK()) {
axis_ = 1;
}
if (!info.GetAttr<int64_t>("saturate", &saturate_).IsOK()) {
saturate_ = 1;
}
}
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
int64_t axis_;
int64_t saturate_;
};
template <class T, class U>
class DequantizeLinear final : public CudaKernel {
public:
DequantizeLinear(const OpKernelInfo& info) : CudaKernel(info) {
if (!info.GetAttr<int64_t>("axis", &axis_).IsOK()) {
axis_ = 1;
}
}
Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;
private:
int64_t axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "reshape.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
Reshape,
kOnnxDomain,
19,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypesIRv9())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
14, 18,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
13, 13,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
5, 12,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
1,
4,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Reshape_1);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "reshape.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
Reshape,
kOnnxDomain,
19,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypesIRv9())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
14, 18,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
13, 13,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
5, 12,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.TypeConstraint("shape", DataTypeImpl::GetTensorType<int64_t>())
.Alias(0, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1),
Reshape);
ONNX_OPERATOR_VERSIONED_KERNEL_EX(
Reshape,
kOnnxDomain,
1,
4,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
Reshape_1);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/tensor/reshape_helper.h"
namespace onnxruntime {
namespace rocm {
class Reshape final : public RocmKernel {
public:
Reshape(const OpKernelInfo& info) : RocmKernel(info),
allow_zero_(info.GetAttrOrDefault("allowzero", static_cast<int64_t>(0)) == 1) {
}
Status ComputeInternal(OpKernelContext* context) const override {
// Copy the second input tensor into the shape vector
const Tensor* shapeTensor = context->Input<Tensor>(1);
if (shapeTensor == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch");
if (shapeTensor->Shape().NumDimensions() != 1) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "A shape tensor must be a vector tensor, got ", shapeTensor->Shape().NumDimensions(), " dimensions");
auto data_span = shapeTensor->template DataAsSpan<int64_t>();
TensorShapeVector shape(data_span.begin(), data_span.end());
const Tensor* X = context->Input<Tensor>(0);
if (X == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch");
const TensorShape& X_shape = X->Shape();
ReshapeHelper helper(X_shape, shape, allow_zero_);
Tensor* Y = context->Output(0, TensorShape(shape));
const void* source = X->DataRaw();
void* target = Y->MutableDataRaw();
// If source and target pointers are not equal (non-inplace operation), we need to copy the data.
if (target != source) {
ORT_ENFORCE(context->GetComputeStream());
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));
}
return Status::OK();
}
private:
bool allow_zero_;
};
class Reshape_1 final : public RocmKernel {
public:
Reshape_1(const OpKernelInfo& info) : RocmKernel(info) {
Status status = info.GetAttrs("shape", shape_);
ORT_ENFORCE(status.IsOK(), "Attribute shape is not set.");
}
Status ComputeInternal(OpKernelContext* context) const override {
TensorShapeVector shape = shape_;
const Tensor* X = context->Input<Tensor>(0);
const TensorShape& X_shape = X->Shape();
ReshapeHelper helper(X_shape, shape);
Tensor* Y = context->Output(0, TensorShape(shape));
const void* source = X->DataRaw();
void* target = Y->MutableDataRaw();
// If source and target pointers are not equal (non-inplace operation), we need to copy the data.
if (target != source) {
ORT_ENFORCE(context->GetComputeStream());
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));
}
return Status::OK();
}
private:
TensorShapeVector shape_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/tensor/reshape_helper.h"
namespace onnxruntime {
namespace cuda {
class Reshape final : public CudaKernel {
public:
Reshape(const OpKernelInfo& info) : CudaKernel(info),
allow_zero_(info.GetAttrOrDefault("allowzero", static_cast<int64_t>(0)) == 1) {
}
Status ComputeInternal(OpKernelContext* context) const override {
// Copy the second input tensor into the shape vector
const Tensor* shapeTensor = context->Input<Tensor>(1);
if (shapeTensor == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch");
if (shapeTensor->Shape().NumDimensions() != 1) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "A shape tensor must be a vector tensor, got ", shapeTensor->Shape().NumDimensions(), " dimensions");
auto data_span = shapeTensor->template DataAsSpan<int64_t>();
TensorShapeVector shape(data_span.begin(), data_span.end());
const Tensor* X = context->Input<Tensor>(0);
if (X == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, "input count mismatch");
const TensorShape& X_shape = X->Shape();
ReshapeHelper helper(X_shape, shape, allow_zero_);
Tensor* Y = context->Output(0, TensorShape(shape));
const void* source = X->DataRaw();
void* target = Y->MutableDataRaw();
// If source and target pointers are not equal (non-inplace operation), we need to copy the data.
if (target != source) {
ORT_ENFORCE(context->GetComputeStream());
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));
}
return Status::OK();
}
private:
bool allow_zero_;
};
class Reshape_1 final : public CudaKernel {
public:
Reshape_1(const OpKernelInfo& info) : CudaKernel(info) {
Status status = info.GetAttrs("shape", shape_);
ORT_ENFORCE(status.IsOK(), "Attribute shape is not set.");
}
Status ComputeInternal(OpKernelContext* context) const override {
TensorShapeVector shape = shape_;
const Tensor* X = context->Input<Tensor>(0);
const TensorShape& X_shape = X->Shape();
ReshapeHelper helper(X_shape, shape);
Tensor* Y = context->Output(0, TensorShape(shape));
const void* source = X->DataRaw();
void* target = Y->MutableDataRaw();
// If source and target pointers are not equal (non-inplace operation), we need to copy the data.
if (target != source) {
ORT_ENFORCE(context->GetComputeStream());
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));
}
return Status::OK();
}
private:
TensorShapeVector shape_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "resize.h"
namespace onnxruntime {
namespace rocm {
#define REGISTER_KERNEL_TYPED(T) \
ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \
Resize, \
kOnnxDomain, \
10, 10, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.InputMemoryType(OrtMemTypeCPUInput, 1) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
Resize<T>); \
ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \
Resize, \
kOnnxDomain, \
11, 12, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.InputMemoryType(OrtMemTypeCPUInput, 1) \
.InputMemoryType(OrtMemTypeCPUInput, 2) \
.InputMemoryType(OrtMemTypeCPUInput, 3) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<T>()), \
Resize<T>); \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
Resize, \
kOnnxDomain, \
13, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.InputMemoryType(OrtMemTypeCPUInput, 1) \
.InputMemoryType(OrtMemTypeCPUInput, 2) \
.InputMemoryType(OrtMemTypeCPUInput, 3) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<T>()), \
Resize<T>);
REGISTER_KERNEL_TYPED(float)
REGISTER_KERNEL_TYPED(double)
REGISTER_KERNEL_TYPED(MLFloat16)
REGISTER_KERNEL_TYPED(int32_t)
REGISTER_KERNEL_TYPED(uint8_t)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "resize.h"
namespace onnxruntime {
namespace cuda {
#define REGISTER_KERNEL_TYPED(T) \
ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \
Resize, \
kOnnxDomain, \
10, 10, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.InputMemoryType(OrtMemTypeCPUInput, 1) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
Resize<T>); \
ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \
Resize, \
kOnnxDomain, \
11, 12, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.InputMemoryType(OrtMemTypeCPUInput, 1) \
.InputMemoryType(OrtMemTypeCPUInput, 2) \
.InputMemoryType(OrtMemTypeCPUInput, 3) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<T>()), \
Resize<T>); \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
Resize, \
kOnnxDomain, \
13, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.InputMemoryType(OrtMemTypeCPUInput, 1) \
.InputMemoryType(OrtMemTypeCPUInput, 2) \
.InputMemoryType(OrtMemTypeCPUInput, 3) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<T>()), \
Resize<T>);
REGISTER_KERNEL_TYPED(float)
REGISTER_KERNEL_TYPED(double)
REGISTER_KERNEL_TYPED(MLFloat16)
REGISTER_KERNEL_TYPED(int32_t)
REGISTER_KERNEL_TYPED(uint8_t)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/tensor/upsample.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class Resize : public Upsample<T> {
public:
Resize(const OpKernelInfo& info) : Upsample<T>(info) {
}
Status ComputeInternal(OpKernelContext* context) const override {
return Upsample<T>::ComputeInternal(context);
}
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/tensor/upsample.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class Resize : public Upsample<T> {
public:
Resize(const OpKernelInfo& info) : Upsample<T>(info) {
}
Status ComputeInternal(OpKernelContext* context) const override {
return Upsample<T>::ComputeInternal(context);
}
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/common/common.h"
#include "core/providers/cpu/tensor/upsamplebase.h"
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
size_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,
const gsl::span<const int64_t>& output_dims);
template <typename T>
void ResizeImpl(
hipStream_t stream,
const onnxruntime::UpsampleMode upsample_mode,
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float, 10>& roi,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
bool exclude_outside,
onnxruntime::ResizeCoordinateTransformationMode coordinate_transform_mode,
onnxruntime::ResizeNearestMode nearest_mode,
void* dims_mapping);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/common/common.h"
#include "core/providers/cpu/tensor/upsamplebase.h"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
size_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,
const gsl::span<const int64_t>& output_dims);
template <typename T>
void ResizeImpl(
cudaStream_t stream,
const onnxruntime::UpsampleMode upsample_mode,
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float, 10>& roi,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
bool exclude_outside,
onnxruntime::ResizeCoordinateTransformationMode coordinate_transform_mode,
onnxruntime::ResizeNearestMode nearest_mode,
void* dims_mapping);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "reverse_sequence.h"
#include "reverse_sequence_impl.h"
#include "core/providers/cpu/tensor/utils.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
ReverseSequence,
kOnnxDomain,
10,
kRocmExecutionProvider,
(*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
ReverseSequenceOp);
#define ReverseSequenceCallCudaImplTypeAs(T, TEqual) \
if (X.IsDataType<T>()) { \
HIP_RETURN_IF_ERROR(ReverseSequenceCudaImpl( \
Stream(context), \
reinterpret_cast<const typename ToHipType<TEqual>::MappedType*>(X.Data<T>()), \
seq_lengths.Data<int64_t>(), \
reinterpret_cast<typename ToHipType<TEqual>::MappedType*>(Y.MutableData<T>()), \
gsl::narrow<int>(batch_size), gsl::narrow<int>(max_seq_len), gsl::narrow<int>(element_size), \
time_major_)); \
return Status::OK(); \
}
Status ReverseSequenceOp::ComputeInternal(OpKernelContext* context) const {
const auto& X = *context->Input<Tensor>(0);
const auto& dims = X.Shape();
const auto batch_size = time_major_ ? dims[1] : dims[0];
const auto max_seq_len = time_major_ ? dims[0] : dims[1];
const auto element_size = dims.SizeFromDimension(2);
const auto& seq_lengths = *context->Input<Tensor>(1);
const auto& seq_len_shape = seq_lengths.Shape();
if (seq_len_shape.NumDimensions() != 1 || seq_len_shape[0] != batch_size) {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "sequence_lens shape must be {batch_size}. Got:",
seq_len_shape, ". batch_size=", batch_size);
}
auto& Y = *context->Output(0, dims);
ReverseSequenceCallCudaImplTypeAs(float, int32_t);
ReverseSequenceCallCudaImplTypeAs(int32_t, int32_t);
ReverseSequenceCallCudaImplTypeAs(uint32_t, int32_t);
ReverseSequenceCallCudaImplTypeAs(MLFloat16, int16_t);
ReverseSequenceCallCudaImplTypeAs(int16_t, int16_t);
ReverseSequenceCallCudaImplTypeAs(uint16_t, int16_t);
ReverseSequenceCallCudaImplTypeAs(int8_t, int8_t);
ReverseSequenceCallCudaImplTypeAs(uint8_t, int8_t);
ReverseSequenceCallCudaImplTypeAs(bool, int8_t);
ReverseSequenceCallCudaImplTypeAs(int64_t, int64_t);
ReverseSequenceCallCudaImplTypeAs(double, int64_t);
ReverseSequenceCallCudaImplTypeAs(uint64_t, int64_t);
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED,
"Type for ", X.DataType(), " is not supported yet in ReverseSequence.");
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "reverse_sequence.h"
#include "reverse_sequence_impl.h"
#include "core/providers/cpu/tensor/utils.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
ReverseSequence,
kOnnxDomain,
10,
kCudaExecutionProvider,
(*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
ReverseSequenceOp);
#define ReverseSequenceCallCudaImplTypeAs(T, TEqual) \
if (X.IsDataType<T>()) { \
CUDA_RETURN_IF_ERROR(ReverseSequenceCudaImpl( \
Stream(context), \
reinterpret_cast<const typename ToCudaType<TEqual>::MappedType*>(X.Data<T>()), \
seq_lengths.Data<int64_t>(), \
reinterpret_cast<typename ToCudaType<TEqual>::MappedType*>(Y.MutableData<T>()), \
gsl::narrow<int>(batch_size), gsl::narrow<int>(max_seq_len), gsl::narrow<int>(element_size), \
time_major_)); \
return Status::OK(); \
}
Status ReverseSequenceOp::ComputeInternal(OpKernelContext* context) const {
const auto& X = *context->Input<Tensor>(0);
const auto& dims = X.Shape();
const auto batch_size = time_major_ ? dims[1] : dims[0];
const auto max_seq_len = time_major_ ? dims[0] : dims[1];
const auto element_size = dims.SizeFromDimension(2);
const auto& seq_lengths = *context->Input<Tensor>(1);
const auto& seq_len_shape = seq_lengths.Shape();
if (seq_len_shape.NumDimensions() != 1 || seq_len_shape[0] != batch_size) {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "sequence_lens shape must be {batch_size}. Got:",
seq_len_shape, ". batch_size=", batch_size);
}
auto& Y = *context->Output(0, dims);
ReverseSequenceCallCudaImplTypeAs(float, int32_t);
ReverseSequenceCallCudaImplTypeAs(int32_t, int32_t);
ReverseSequenceCallCudaImplTypeAs(uint32_t, int32_t);
ReverseSequenceCallCudaImplTypeAs(MLFloat16, int16_t);
ReverseSequenceCallCudaImplTypeAs(int16_t, int16_t);
ReverseSequenceCallCudaImplTypeAs(uint16_t, int16_t);
ReverseSequenceCallCudaImplTypeAs(int8_t, int8_t);
ReverseSequenceCallCudaImplTypeAs(uint8_t, int8_t);
ReverseSequenceCallCudaImplTypeAs(bool, int8_t);
ReverseSequenceCallCudaImplTypeAs(int64_t, int64_t);
ReverseSequenceCallCudaImplTypeAs(double, int64_t);
ReverseSequenceCallCudaImplTypeAs(uint64_t, int64_t);
return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED,
"Type for ", X.DataType(), " is not supported yet in ReverseSequence.");
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class ReverseSequenceOp final : public RocmKernel {
public:
ReverseSequenceOp(const OpKernelInfo& info) : RocmKernel(info) {
int64_t batch_axis;
int64_t time_axis;
ORT_ENFORCE(info.GetAttr<int64_t>("batch_axis", &batch_axis).IsOK());
ORT_ENFORCE(info.GetAttr<int64_t>("time_axis", &time_axis).IsOK());
ORT_ENFORCE(batch_axis < 2, "Invalid batch_axis of ", batch_axis, ". Must be 0 or 1");
ORT_ENFORCE(time_axis < 2, "Invalid time_axis of ", time_axis, ". Must be 0 or 1");
ORT_ENFORCE(batch_axis != time_axis,
"time_axis and batch_axis must have different values but both are ", time_axis);
time_major_ = time_axis == 0;
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool time_major_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class ReverseSequenceOp final : public CudaKernel {
public:
ReverseSequenceOp(const OpKernelInfo& info) : CudaKernel(info) {
int64_t batch_axis;
int64_t time_axis;
ORT_ENFORCE(info.GetAttr<int64_t>("batch_axis", &batch_axis).IsOK());
ORT_ENFORCE(info.GetAttr<int64_t>("time_axis", &time_axis).IsOK());
ORT_ENFORCE(batch_axis < 2, "Invalid batch_axis of ", batch_axis, ". Must be 0 or 1");
ORT_ENFORCE(time_axis < 2, "Invalid time_axis of ", time_axis, ". Must be 0 or 1");
ORT_ENFORCE(batch_axis != time_axis,
"time_axis and batch_axis must have different values but both are ", time_axis);
time_major_ = time_axis == 0;
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool time_major_;
};
} // namespace cuda
} // namespace onnxruntime
### |