#include "lietorch_gpu.h" #include #include "common.h" #include "dispatch.h" #include "so3.h" #include "rxso3.h" #include "se3.h" #include "sim3.h" #define GPU_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i __global__ void exp_forward_kernel(const scalar_t* a_ptr, scalar_t* X_ptr, int num_threads) { // exponential map forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Tangent a(a_ptr + i*Group::K); Eigen::Map(X_ptr + i*Group::N) = Group::Exp(a).data(); } } template __global__ void exp_backward_kernel(const scalar_t* grad, const scalar_t* a_ptr, scalar_t* da, int num_threads) { // exponential map backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Tangent a(a_ptr + i*Group::K); Grad dX(grad + i*Group::N); Eigen::Map(da + i*Group::K) = dX * Group::left_jacobian(a); } } template __global__ void log_forward_kernel(const scalar_t* X_ptr, scalar_t* a_ptr, int num_threads) { // logarithm map forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Tangent a = Group(X_ptr + i*Group::N).Log(); Eigen::Map(a_ptr + i*Group::K) = a; } } template __global__ void log_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t* dX, int num_threads) { // logarithm map backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Tangent a = Group(X_ptr + i*Group::N).Log(); Grad da(grad + i*Group::K); Eigen::Map(dX + i*Group::N) = da * Group::left_jacobian_inverse(a); } } template __global__ void inv_forward_kernel(const scalar_t* X_ptr, scalar_t* Y_ptr, int num_threads) { // group inverse forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Eigen::Map(Y_ptr + i*Group::N) = X.inv().data(); } } template __global__ void inv_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t *dX, int num_threads) { // group inverse backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group Y = Group(X_ptr + i*Group::N).inv(); Grad dY(grad + i*Group::N); Eigen::Map(dX + i*Group::N) = -dY * Y.Adj(); } } template __global__ void mul_forward_kernel(const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* Z_ptr, int num_threads) { // group multiplication forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group Z = Group(X_ptr + i*Group::N) * Group(Y_ptr + i*Group::N); Eigen::Map(Z_ptr + i*Group::N) = Z.data(); } } template __global__ void mul_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* dX, scalar_t* dY, int num_threads) { // group multiplication backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Grad dZ(grad + i*Group::N); Group X(X_ptr + i*Group::N); Eigen::Map(dX + i*Group::N) = dZ; Eigen::Map(dY + i*Group::N) = dZ * X.Adj(); } } template __global__ void adj_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) { // adjoint forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Tangent a(a_ptr + i*Group::K); Eigen::Map(b_ptr + i*Group::K) = X.Adj(a); } } template __global__ void adj_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) { // adjoint backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Grad db(grad + i*Group::K); Tangent a(a_ptr + i*Group::K); Tangent b = X.Adj() * a; Eigen::Map(da + i*Group::K) = db * X.Adj(); Eigen::Map(dX + i*Group::N) = -db * Group::adj(b); } } template __global__ void adjT_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) { // adjoint forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Tangent a(a_ptr + i*Group::K); Eigen::Map(b_ptr + i*Group::K) = X.AdjT(a); } } template __global__ void adjT_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) { // adjoint backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Tangent db(grad + i*Group::K); Grad a(a_ptr + i*Group::K); Eigen::Map(da + i*Group::K) = X.Adj(db); Eigen::Map(dX + i*Group::N) = -a * Group::adj(X.Adj(db)); } } template __global__ void act_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) { // action on point forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; using Point = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Point p(p_ptr + i*3); Eigen::Map(q_ptr + i*3) = X * p; } } template __global__ void act_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) { // adjoint backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Point = Eigen::Matrix; using PointGrad = Eigen::Matrix; using Transformation = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Point p(p_ptr + i*3); PointGrad dq(grad + i*3); Eigen::Map(dp + i*3) = dq * X.Matrix4x4().block<3,3>(0,0); Eigen::Map(dX + i*Group::N) = dq * Group::act_jacobian(X*p); } } template __global__ void act4_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) { // action on point forward kernel using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; using Point = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Point p(p_ptr + i*4); Eigen::Map(q_ptr + i*4) = X.act4(p); } } template __global__ void act4_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) { // adjoint backward kernel using Tangent = Eigen::Matrix; using Grad = Eigen::Matrix; using Point = Eigen::Matrix; using PointGrad = Eigen::Matrix; using Transformation = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Point p(p_ptr + i*4); PointGrad dq(grad + i*4); Eigen::Map(dp + i*4) = dq * X.Matrix4x4(); const Point q = X.act4(p); Eigen::Map(dX + i*Group::N) = dq * Group::act4_jacobian(q); } } template __global__ void as_matrix_forward_kernel(const scalar_t* X_ptr, scalar_t* T_ptr, int num_threads) { // convert to 4x4 matrix representation using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; using Matrix4 = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Eigen::Map(T_ptr + i*16) = X.Matrix4x4(); } } template __global__ void orthogonal_projector_kernel(const scalar_t* X_ptr, scalar_t* P_ptr, int num_threads) { // orthogonal projection matrix using Proj = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Eigen::Map(P_ptr + i*Group::N*Group::N) = X.orthogonal_projector(); } } template __global__ void jleft_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) { // left jacobian inverse action using Tangent = Eigen::Matrix; using Data = Eigen::Matrix; GPU_1D_KERNEL_LOOP(i, num_threads) { Group X(X_ptr + i*Group::N); Tangent a(a_ptr + i*Group::K); Tangent b = Group::left_jacobian_inverse(X.Log()) * a; Eigen::Map(b_ptr + i*Group::K) = b; } } // unary operations torch::Tensor exp_forward_gpu(int group_id, torch::Tensor a) { int batch_size = a.size(0); torch::Tensor X; DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_forward_kernel", ([&] { X = torch::zeros({batch_size, group_t::N}, a.options()); exp_forward_kernel<<>>( a.data_ptr(), X.data_ptr(), batch_size); })); return X; } std::vector exp_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor a) { int batch_size = a.size(0); torch::Tensor da = torch::zeros(a.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_backward_kernel", ([&] { exp_backward_kernel<<>>( grad.data_ptr(), a.data_ptr(), da.data_ptr(), batch_size); })); return {da}; } torch::Tensor log_forward_gpu(int group_id, torch::Tensor X) { int batch_size = X.size(0); torch::Tensor a; DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_forward_kernel", ([&] { a = torch::zeros({batch_size, group_t::K}, X.options()); log_forward_kernel<<>>( X.data_ptr(), a.data_ptr(), batch_size); })); return a; } std::vector log_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_backward_kernel", ([&] { log_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), dX.data_ptr(), batch_size); })); return {dX}; } torch::Tensor inv_forward_gpu(int group_id, torch::Tensor X) { int batch_size = X.size(0); torch::Tensor Y = torch::zeros_like(X); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_forward_kernel", ([&] { inv_forward_kernel<<>>( X.data_ptr(), Y.data_ptr(), batch_size); })); return Y; } std::vector inv_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_backward_kernel", ([&] { inv_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), dX.data_ptr(), batch_size); })); return {dX}; } // binary operations torch::Tensor mul_forward_gpu(int group_id, torch::Tensor X, torch::Tensor Y) { int batch_size = X.size(0); torch::Tensor Z = torch::zeros_like(X); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_forward_kernel", ([&] { mul_forward_kernel<<>>( X.data_ptr(), Y.data_ptr(), Z.data_ptr(), batch_size); })); return Z; } std::vector mul_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor Y) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); torch::Tensor dY = torch::zeros(Y.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_backward_kernel", ([&] { mul_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), Y.data_ptr(), dX.data_ptr(), dY.data_ptr(), batch_size); })); return {dX, dY}; } torch::Tensor adj_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) { int batch_size = X.size(0); torch::Tensor b = torch::zeros(a.sizes(), a.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_forward_kernel", ([&] { adj_forward_kernel<<>>( X.data_ptr(), a.data_ptr(), b.data_ptr(), batch_size); })); return b; } std::vector adj_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); torch::Tensor da = torch::zeros(a.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_backward_kernel", ([&] { adj_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), a.data_ptr(), dX.data_ptr(), da.data_ptr(), batch_size); })); return {dX, da}; } torch::Tensor adjT_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) { int batch_size = X.size(0); torch::Tensor b = torch::zeros(a.sizes(), a.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_forward_kernel", ([&] { adjT_forward_kernel<<>>( X.data_ptr(), a.data_ptr(), b.data_ptr(), batch_size); })); return b; } std::vector adjT_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); torch::Tensor da = torch::zeros(a.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_backward_kernel", ([&] { adjT_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), a.data_ptr(), dX.data_ptr(), da.data_ptr(), batch_size); })); return {dX, da}; } torch::Tensor act_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) { int batch_size = X.size(0); torch::Tensor q = torch::zeros(p.sizes(), p.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_forward_kernel", ([&] { act_forward_kernel<<>>( X.data_ptr(), p.data_ptr(), q.data_ptr(), batch_size); })); return q; } std::vector act_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); torch::Tensor dp = torch::zeros(p.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_backward_kernel", ([&] { act_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), p.data_ptr(), dX.data_ptr(), dp.data_ptr(), batch_size); })); return {dX, dp}; } torch::Tensor act4_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) { int batch_size = X.size(0); torch::Tensor q = torch::zeros(p.sizes(), p.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_forward_kernel", ([&] { act4_forward_kernel<<>>( X.data_ptr(), p.data_ptr(), q.data_ptr(), batch_size); })); return q; } std::vector act4_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) { int batch_size = X.size(0); torch::Tensor dX = torch::zeros(X.sizes(), grad.options()); torch::Tensor dp = torch::zeros(p.sizes(), grad.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_backward_kernel", ([&] { act4_backward_kernel<<>>( grad.data_ptr(), X.data_ptr(), p.data_ptr(), dX.data_ptr(), dp.data_ptr(), batch_size); })); return {dX, dp}; } torch::Tensor as_matrix_forward_gpu(int group_id, torch::Tensor X) { int batch_size = X.size(0); torch::Tensor T4x4 = torch::zeros({X.size(0), 4, 4}, X.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "as_matrix_forward_kernel", ([&] { as_matrix_forward_kernel<<>>( X.data_ptr(), T4x4.data_ptr(), batch_size); })); return T4x4; } torch::Tensor orthogonal_projector_gpu(int group_id, torch::Tensor X) { int batch_size = X.size(0); torch::Tensor P; DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "orthogonal_projector_kernel", ([&] { P = torch::zeros({X.size(0), group_t::N, group_t::N}, X.options()); orthogonal_projector_kernel<<>>( X.data_ptr(), P.data_ptr(), batch_size); })); return P; } torch::Tensor jleft_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) { int batch_size = X.size(0); torch::Tensor b = torch::zeros(a.sizes(), a.options()); DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "jleft_forward_kernel", ([&] { jleft_forward_kernel<<>>( X.data_ptr(), a.data_ptr(), b.data_ptr(), batch_size); })); return b; }