text
stringlengths 1
2.05k
|
---|
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def gather_elements(data, indices, axis=0):
data_swaped = np.swapaxes(data, 0, axis)
index_swaped = np.swapaxes(indices, 0, axis)
gathered = np.choose(index_swaped, data_swaped, mode="wrap")
y = np.swapaxes(gathered, 0, axis)
return y |
class Gather_elements(RunAll):
@staticmethod
def gather_elements_fp16x16():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.uint32)
y = gather_elements(x1, x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_elements_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
x2 = np.array([[0, 0], [1, 0]], dtype=np.int32)
y = gather_elements(x1, x2, axis=1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_elements_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.uint32)
y = gather_elements(x1, x2, axis=2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_elements_axis2"
make_test(
inputs = [x1, x2], output = y, func |
_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))",
name= name)
def negative_indices():
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
x2 = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32)
y = gather_elements(x1, x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_elements_negative_indices"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))",
name= name)
default()
axis1()
axis2()
negative_indices() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def gather_nd_impl(
data: np.ndarray, indices: np.ndarray, batch_dims: int
) -> np.ndarray:
data_rank = len(data.shape)
assert indices.shape[-1] <= data_rank
batch_dims_shape = []
batch_dims_size = 1
for i in range(batch_dims):
batch_dims_shape.append(indices.shape[i])
batch_dims_size *= indices.shape[i]
output_shape = (
batch_dims_shape + list(indices.shape)[batch_dims:-1]
if (indices.shape[-1] == data_rank - batch_dims)
else batch_dims_shape
+ list(indices.shape)[batch_dims:-1]
+ list(data.shape)[batch_dims + indices.shape[-1] :]
)
output_data_buffer = []
reshaped_indices = indices.reshape(batch_dims_size, -1, indices.shape[-1])
reshaped_data = data.reshape((batch_dims_size,) + data.shape[batch_dims:])
for batch_dim in range(reshaped_indices.shape[0]):
for outer_dim in range(reshaped_indices.shape[1]):
gather_index = tuple(reshaped_indices[batch_dim][outer_dim])
output_data_buffer.append(reshaped_data[(batch_dim, *gather_index)])
return np.asarray(output_data_buffer, dtype=data.dtype).reshape(output_shape) |
class Gather_nd(RunAll):
@staticmethod
def gather_nd_fp16x16():
def gather_nd_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_nd_fp16x16_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))",
name= name)
def batch_dims1():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_nd_fp16x16_3d_batch_dims1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))",
name= name)
def batch_dims2():
x1 = np.arange(0,54).reshape(3,3,3,2).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) |
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_nd_fp16x16_3d_batch_dims2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))",
name= name)
default()
batch_dims1()
batch_dims2()
gather_nd_3D()
@staticmethod
def gather_nd_fp8x23():
def gather_nd_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.int64)
y = gather_nd_impl(x1, x2, batch_dims=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "gather_nd_fp8x23_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))",
name= name)
def batch_dims1():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.int64)
y = gather_nd_impl(x1, x2, batch_dims=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "gather_nd_fp8x23_3d_batch_dims1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))",
name= name)
de |
f batch_dims2():
x1 = np.arange(0,54).reshape(3,3,3,2).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=2)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "gather_nd_fp8x23_3d_batch_dims2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))",
name= name)
default()
batch_dims1()
batch_dims2()
gather_nd_3D()
@staticmethod
def gather_nd_i8():
def gather_nd_3D():
def default():
x1 = np.arange(0,9).reshape(3,3).astype(np.int8)
x2 = np.random.randint(low = 0,high=2, size=(3,2)).astype(np.int8)
y = gather_nd_impl(x1, x2, batch_dims=0)
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "gather_nd_i8_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))",
name= name)
def batch_dims1():
x1 = np.arange(0,9).reshape(3,3).astype(np.int8)
x2 = np.random.randint(low = 0,high=2, size=(3,1)).astype(np.int8)
y = gather_nd_impl(x1, x2, batch_dims=1)
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "gather_nd_i8_3 |
d_batch_dims1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))",
name= name)
default()
batch_dims1()
gather_nd_3D()
@staticmethod
def gather_nd_i32():
def gather_nd_3D():
def default():
x1 = np.arange(0,24).reshape(4,2,3).astype(np.int32)
x2 = np.random.randint(low = 0,high=2, size=(3,2)).astype(np.int32)
y = gather_nd_impl(x1, x2, batch_dims=0)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "gather_nd_i32_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))",
name= name)
def batch_dims1():
x1 = np.arange(0,108).reshape(4,3,3,3).astype(np.int32)
x2 = np.random.randint(low = 0,high=3, size=(4,2,3)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=1)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "gather_nd_i32_3d_batch_dims1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))",
name= name)
def batch_dims2():
x1 = np.arange(0,54).reshape(3,3,3,2).astype(np.int64)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=2)
x1 = Tensor(Dtype.I32, x1. |
shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "gather_nd_i32_3d_batch_dims2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))",
name= name)
default()
batch_dims1()
batch_dims2()
gather_nd_3D()
@staticmethod
def gather_nd_u32():
def gather_nd_3D():
def default():
x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=0)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "gather_nd_u32_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))",
name= name)
def batch_dims1():
x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=1)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "gather_nd_u32_batch_dims1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))",
name= name)
def batch_dims2():
x1 = np.arange(0,108).reshape(3,3,4,3).asty |
pe(np.int32)
x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32)
y = gather_nd_impl(x1, x2, batch_dims=2)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "gather_nd_u32_batch_dims2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))",
name= name)
default()
batch_dims1()
batch_dims2()
gather_nd_3D() |
from typing |
import Optional |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def gemm_reference_implementation(
A: np.ndarray,
B: np.ndarray,
C: Optional[np.ndarray] = None,
alpha: float = 1.0,
beta: float = 1.0,
transA: int = 0,
transB: int = 0,
) -> np.ndarray:
A = A if transA == 0 else A.T
B = B if transB == 0 else B.T
C = C if C is not None else np.array(0)
Y = alpha * np.dot(A, B) + beta * C
return Y |
class Gemm(RunAll):
@staticmethod
def gemm_default_zero_bias():
a = np.random.ranf([3, 5]).astype(np.float32)
b = np.random.ranf([5, 4]).astype(np.float32)
c = np.zeros([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_default_no_bias"
make_test(
[a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false)", name, Trait.NN)
@staticmethod
def gemm_default_vector_bias():
a = np.random.ranf([2, 7]).astype(np.float32)
b = np.random.ranf([7, 4]).astype(np.float32)
c = np.random.ranf([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
c = Tensor(Dtype.FP16x16, c.shape, to_fp(
c.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_default_vector_bias"
make_test(
[a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false)", name, Trait.NN)
@staticmethod
def gemm_default_matrix_bias():
a = np.random.ranf([3, 6]).astype(np.float32)
b = np.random.ranf([6, 4]).astype(np.float32)
c = np.random.ranf([3, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape |
, to_fp(
b.flatten(), FixedImpl.FP16x16))
c = Tensor(Dtype.FP16x16, c.shape, to_fp(
c.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_default_matrix_bias"
make_test(
[a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false)", name, Trait.NN)
@staticmethod
def gemm_transposeA():
a = np.random.ranf([6, 3]).astype(np.float32)
b = np.random.ranf([6, 4]).astype(np.float32)
c = np.zeros([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, transA=1)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_transposeA"
make_test(
[a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false)", name, Trait.NN)
@staticmethod
def gemm_transposeB():
a = np.random.ranf([3, 6]).astype(np.float32)
b = np.random.ranf([4, 6]).astype(np.float32)
c = np.zeros([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, transB=1)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_transposeB"
make_test(
[a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true)", name, Trait.NN)
@staticmethod
def gemm_alpha():
a = np.random.ranf([3, 5]).astype(np.float32)
b = |
np.random.ranf([5, 4]).astype(np.float32)
c = np.zeros([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, alpha=0.5)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_alpha"
make_test(
[a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::Some(FixedTrait::new(32768, false)), Option::None(()), false, false)", name, Trait.NN)
@staticmethod
def gemm_beta():
a = np.random.ranf([2, 7]).astype(np.float32)
b = np.random.ranf([7, 4]).astype(np.float32)
c = np.random.ranf([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, beta=0.5)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
c = Tensor(Dtype.FP16x16, c.shape, to_fp(
c.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_beta"
make_test(
[a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::Some(FixedTrait::new(32768, false)), false, false)", name, Trait.NN)
@staticmethod
def gemm_all_attributes():
a = np.random.ranf([4, 3]).astype(np.float32)
b = np.random.ranf([5, 4]).astype(np.float32)
c = np.random.ranf([1, 5]).astype(np.float32)
y = gemm_reference_implementation(
a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35
)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16)) |
c = Tensor(Dtype.FP16x16, c.shape, to_fp(
c.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gemm_all_attributes"
make_test(
[a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::Some(FixedTrait::new(16384, false)), Option::Some(FixedTrait::new(22938, false)), true, true)", name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Greater(RunAll):
@staticmethod
def greater_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.greater(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_u32"
make_test([x, y], z, "input_0.greater(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.greater(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_u32_broadcast"
make_test([x, y], z, "input_0.greater(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.greater(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_i32"
make_test([x, y], z, "input_0.greater(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int32)
z = np.greater(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_i32_broadcast"
make_test([x, y], z, "input_0.greater(@input_1)", name)
default()
br |
oadcast()
@staticmethod
def greater_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.greater(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_i8"
make_test([x, y], z, "input_0.greater(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int8)
z = np.greater(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_i8_broadcast"
make_test([x, y], z, "input_0.greater(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.greater(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_fp8x23"
make_test([x, y], z, "input_0.greater(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.greater(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(D |
type.U32, z.shape, z.flatten())
name = "greater_fp8x23_broadcast"
make_test([x, y], z, "input_0.greater(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.greater(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_fp16x16"
make_test([x, y], z, "input_0.greater(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.greater(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_fp16x16_broadcast"
make_test([x, y], z, "input_0.greater(@input_1)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Greater_equal(RunAll):
@staticmethod
def greater_equal_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.greater_equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_u32"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 3, 1)).astype(np.uint32)
z = np.greater_equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_u32_broadcast"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_equal_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.greater_equal(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_i32"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32)
z = np.greater_equal(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_i32_broadcast" |
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_equal_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.greater_equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_i8"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8)
z = np.greater_equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_i8_broadcast"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_equal_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.greater_equal(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_fp8x23"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = np.greater_equal(x, y)
x = Tensor(Dtype.FP8x23, x.sh |
ape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_fp8x23_broadcast"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def greater_equal_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.greater_equal(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_fp16x16"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = np.greater_equal(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "greater_equal_fp16x16_broadcast"
make_test([x, y], z, "input_0.greater_equal(@input_1)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
from .resize |
import _get_all_coords |
import numbers
from typing |
import List |
import numpy as np
def grid_sample(X, grid, mode='linear', padding_mode='zeros', align_corners=0):
x_dims = X.shape
grid_dims = grid.shape
N = x_dims[0]
C = x_dims[1]
y_dims = (N, C, *grid_dims[1:-1])
if np.prod(y_dims) == 0:
return np.array([], dtype=X.dtype)
Y = np.empty(y_dims, dtype=X.dtype)
for n in range(N):
grid_data = grid[n]
for c in range(C):
X_data = X[n, c]
num_dims = len(x_dims[2:])
dims = x_dims[2:]
border = _prepare_border(dims, align_corners=align_corners)
for ox in _get_all_coords(Y[n, c]):
nx = grid_data[tuple(ox)]
nx = nx[::-1]
x = _gs_denormalize_coordinates(
n=nx, dims=dims, align_corners=align_corners
)
if mode == "nearest":
x = np.rint(x)
for i, v in enumerate(x):
x_min = border[i]
x_max = border[i + num_dims]
if v < x_min or v > x_max:
if padding_mode == "border":
x[i] = _clamp(v, 0, dims[i] - 1)
elif padding_mode == "reflection":
x[i] = _gs_reflect(v, x_min, x_max)
if mode == "nearest":
x = x.astype(np.int32)
Y[n][c][tuple(ox)] = _pixel_at_ndarray(
ndarray=X_data,
x=x,
border=border,
padding_mode=padding_mode,
)
elif mode == "linear":
Y[n][c][tuple(ox)] = _gs_linear_interpolation_nd_with_x(
data=X_data, x=x, border=border, padding_mode=padding_mode
)
elif mode == "cubic":
Y[n][c][tuple(ox)] = _gs_cubic_interpolation_nd_with_x(
data=X_data, x=x, border |
=border, padding_mode=padding_mode
)
else:
raise RuntimeError(
"GridSample interpolation only supports nearest, linear, and cubic modes."
)
return (Y.astype(X.dtype),)
def _gs_denormalize(n, length: int, align_corners: bool):
if align_corners:
x = (n + 1) / 2.0 * (length - 1)
else:
x = ((n + 1) * length - 1) / 2.0
return x
def _gs_denormalize_coordinates(n, dims, align_corners: bool):
x = np.zeros(len(n), dtype=np.float32)
for i, (v, dim) in enumerate(zip(n, dims)):
x[i] = _gs_denormalize(n=v, length=dim, align_corners=align_corners)
return x
def _gs_reflect(x, x_min, x_max):
"""Reflect by the near border till within the borders
Use float for borders to avoid potential issues with integer T
"""
fx = x
rng = x_max - x_min
if fx < x_min:
dx = x_min - fx
n = int(dx / rng)
r = dx - n * rng
if n % 2 == 0:
fx = x_min + r
else:
fx = x_max - r
elif fx > x_max:
dx = fx - x_max
n = int(dx / rng)
r = dx - n * rng
if n % 2 == 0:
fx = x_max - r
else:
fx = x_min + r
return fx
def _gs_get_cubic_coeffs(x, coeffs):
"""Calculate cubic convolution interpolation coefficients
ROBERT G. KEYS https:
Use float to avoid potential issues with integer.
"""
cubic_alpha = -0.75
x = abs(x)
coeffs[0] = (
(cubic_alpha * (x + 1) - 5 * cubic_alpha) * (x + 1) + 8 * cubic_alpha
) * (x + 1) - 4 * cubic_alpha
coeffs[1] = ((cubic_alpha + 2) * x - (cubic_alpha + 3)) * x * x + 1
coeffs[2] = ((cubic_alpha + 2) * (1 - x) - (cubic_alpha + 3)) * (1 - x) * (
1 - x
) + 1
coeffs[3] = (
(cubic_alpha * (2 - x) - 5 * cubic_alpha) * (2 - x) + 8 * cubic_alpha
) * (2 - x) - 4 * cubic_alpha
def _gs_get_linear_coeffs(x, coeffs):
x = abs(x)
coeffs[0] = 1 |
- x
coeffs[1] = x
def _gs_bicubic_interpolate(p, x, y):
v = np.empty((4,), dtype=p.dtype)
coeffs = np.empty((4,), dtype=p.dtype)
_gs_get_cubic_coeffs(x, coeffs)
for i in range(4):
v[i] = coeffs @ p[i, :]
_gs_get_cubic_coeffs(y, coeffs)
return coeffs @ v
def _gs_cubic_interpolation_1d_with_x(data, x, border, padding_mode):
v = np.empty((4,), dtype=data.dtype)
coeffs = np.empty((4,), dtype=data.dtype)
x_0 = int(np.floor(x))
x_1 = x_0 + 1
x_2 = x_0 + 2
x_minus_1 = x_0 - 1
_gs_get_cubic_coeffs(x - x_0, coeffs)
v[0] = _pixel_at_array(
array=data, i=x_minus_1, border=border, padding_mode=padding_mode
)
v[1] = _pixel_at_array(
array=data, i=x_0, border=border, padding_mode=padding_mode
)
v[2] = _pixel_at_array(
array=data, i=x_1, border=border, padding_mode=padding_mode
)
v[3] = _pixel_at_array(
array=data, i=x_2, border=border, padding_mode=padding_mode
)
return coeffs @ v
def _gs_linear_interpolation_1d_with_x(data, x, border, padding_mode):
v = np.empty((2,), dtype=data.dtype)
coeffs = np.empty((2,), dtype=data.dtype)
x_0 = int(np.floor(x))
x_1 = x_0 + 1
_gs_get_linear_coeffs(x - x_0, coeffs)
v[0] = _pixel_at_array(
array=data, i=x_0, border=border, padding_mode=padding_mode
)
v[1] = _pixel_at_array(
array=data, i=x_1, border=border, padding_mode=padding_mode
)
return coeffs @ v
def _gs_linear_interpolation_nd_with_x(data, x, border, padding_mode):
num_dims = data.ndim
assert num_dims == len(x) == int(len(border) / 2)
if num_dims == 1:
return _gs_linear_interpolation_1d_with_x(
data=data, x=x[0], border=border, padding_mode=padding_mode
)
res1d = []
for i in range(data.shape[0]):
r = _gs_linear_interpolation_nd_with_x(
data=data[i],
x=x[1:],
border=list(border[1:num_dims])
+ list(border[1 + num_dims : 2 * num_dims]), |
padding_mode=padding_mode,
)
res1d.append(r)
res1d = np.array(res1d)
return _gs_linear_interpolation_1d_with_x(
data=res1d,
x=x[0],
border=[border[0], border[num_dims]],
padding_mode=padding_mode,
)
def _gs_cubic_interpolation_nd_with_x(data, x, border, padding_mode):
num_dims = data.ndim
assert num_dims == len(x) == int(len(border) / 2)
if num_dims == 1:
return _gs_cubic_interpolation_1d_with_x(
data=data, x=x[0], border=border, padding_mode=padding_mode
)
res1d = []
for i in range(data.shape[0]):
r = _gs_cubic_interpolation_nd_with_x(
data=data[i],
x=x[1:],
border=list(border[1:num_dims])
+ list(border[1 + num_dims : 2 * num_dims]),
padding_mode=padding_mode,
)
res1d.append(r)
res1d = np.array(res1d)
return _gs_cubic_interpolation_1d_with_x(
data=res1d,
x=x[0],
border=[border[0], border[num_dims]],
padding_mode=padding_mode,
)
def _clamp(val, lo, hi):
if val < lo:
return lo
if val > hi:
return hi
return val
def _pixel_at_ndarray(ndarray, x: List, border, padding_mode):
num_dims = ndarray.ndim
assert num_dims == len(x) == int(len(border) / 2)
if num_dims == 1:
return _pixel_at_array(
array=ndarray, i=x[0], border=border, padding_mode=padding_mode
)
i = x[0]
d = ndarray.shape[0]
if padding_mode == "zeros":
if i >= 0 and i < d:
ndarray = ndarray[i]
else:
i = 0
ndarray = np.zeros_like(ndarray[i])
elif padding_mode == "border":
i = _clamp(i, 0, d - 1)
ndarray = ndarray[i]
else:
i = int(_gs_reflect(i, border[0], border[num_dims]))
ndarray = ndarray[i]
return _pixel_at_ndarray(
ndarray=ndarray,
x=x[1:],
border=list(border[1:num_dims]) + list(borde |
r[1 + num_dims : 2 * num_dims]),
padding_mode=padding_mode,
)
def _pixel_at_array(array, i: int, border, padding_mode):
assert array.ndim == 1
d = array.shape[0]
if padding_mode == "zeros":
if i >= 0 and i < d:
pixel = array[i]
else:
pixel = 0
elif padding_mode == "border":
i = _clamp(i, 0, d - 1)
pixel = array[i]
else:
i = int(_gs_reflect(i, border[0], border[1]))
pixel = array[i]
return pixel
def _prepare_border(dims, align_corners: bool):
num_dims = len(dims)
borders = np.zeros(num_dims * 2)
for i in range(num_dims):
borders[i] = -0.5
borders[i + num_dims] = dims[i] - 0.5
if align_corners:
borders[i] = 0.0
borders[i + num_dims] = dims[i] - 1.0
return borders |
class Grid_sample(RunAll):
@staticmethod
def export_gridsample() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0],
[4.0, 5.0, 6.0, 7.0],
[8.0, 9.0, 10.0, 11.0],
[12.0, 13.0, 14.0, 15.0],
]
]
],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.6000, -1.0000],
[-0.2000, -1.0000],
[0.2000, -1.0000],
[0.6000, -1.0000],
[1.0000, -1.0000],
],
[
[-1.0000, -0.6000],
[-0.6000, -0.6000],
[-0.2000, -0.6000],
[0.2000, -0.6000],
[0.6000, -0.6000],
[1.0000, -0.6000],
],
[
[-1.0000, -0.2000],
[-0.6000, -0.2000],
[-0.2000, -0.2000],
[0.2000, -0.2000],
[0.6000, -0.2000],
[1.0000, -0.2000],
],
[
[-1.0000, 0.2000],
[-0.6000, 0.2000],
[-0.2000, 0.2000],
[0.2000, 0.2000],
[0.6000, 0.2000],
[1.0000, 0.2000],
],
[
[-1.0000, 0.6000],
[-0.6000, 0.6000],
[-0.2000, 0.6000],
[0.2000, 0.6000],
[0.6000, 0.6000],
[1.0000, 0.6000],
], |
[
[-1.0000, 1.0000],
[-0.6000, 1.0000],
[-0.2000, 1.0000],
[0.2000, 1.0000],
[0.6000, 1.0000],
[1.0000, 1.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="linear")
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_paddingmode_zeros() -> None:
x = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-10.0000, -10.0000],
[-5.0000, -5.0000],
[-0.2000, -0.2000],
[10.0000, 10.0000],
],
[
[10.0000, 10.0000],
[-0.2000, -0.2000],
[5.0000, 5.0000],
[10.0000, 10.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="linear")
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16 |
x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_padding_zeros"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_paddingmode_border() -> None:
x = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-10.0000, -10.0000],
[-5.0000, -5.0000],
[-0.2000, -0.2000],
[10.0000, 10.0000],
],
[
[10.0000, 10.0000],
[-0.2000, -0.2000],
[5.0000, 5.0000],
[10.0000, 10.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="linear", padding_mode="border")
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_padding_border"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(PADDING_MODE::BORDER))"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_paddingmode_reflection() -> None:
x = np.array(
[[[[0.0, 1.0], [2. |
0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-10.0000, -10.0000],
[-5.0000, -5.0000],
[-0.2000, -0.2000],
[10.0000, 10.0000],
],
[
[10.0000, 10.0000],
[-0.2000, -0.2000],
[5.0000, 5.0000],
[10.0000, 10.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="linear", padding_mode="reflection")
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_padding_reflection"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(PADDING_MODE::REFLECTION))"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_mode_aligncorners() -> None:
x = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.5000, -0.5000],
[-0.2000, -0.2000],
[0.0000, 0.0000],
],
[
[0.0000, 0.0000],
[-0.2000, -0.2000],
[0.5000, 0.5000],
[1.0000, 1.0000], |
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="linear", align_corners=1)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_aligncorners"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::Some(1),"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_nearest() -> None:
x = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.5000, -0.5000],
[-0.2000, -0.2000],
[0.0000, 0.0000],
],
[
[0.0000, 0.0000],
[-0.2000, -0.2000],
[0.5000, 0.5000],
[1.0000, 1.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="nearest", align_corners=0)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_nearest"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@inpu |
t_1,"
func_sig += "Option::Some(0),"
func_sig += "Option::Some(MODE::NEAREST),"
func_sig += "Option::None)"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_nearest_align_corner() -> None:
x = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.5000, -0.5000],
[-0.2000, -0.2000],
[0.0000, 0.0000],
],
[
[0.0000, 0.0000],
[-0.2000, -0.2000],
[0.5000, 0.5000],
[1.0000, 1.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="nearest", align_corners=1)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_nearest_aligncorner"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::Some(1),"
func_sig += "Option::Some(MODE::NEAREST),"
func_sig += "Option::None)"
make_test(
[x, grid], y, func_sig, name, Trait.NN)
@staticmethod
def export_gridsample_cubic() -> None:
x = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.5000, -0.5000], |
[-0.2000, -0.2000],
[0.0000, 0.0000],
],
[
[0.0000, 0.0000],
[-0.2000, -0.2000],
[0.5000, 0.5000],
[1.0000, 1.0000],
],
]
],
dtype=np.float32,
)
y = grid_sample(x, grid, mode ="cubic", align_corners=0)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "grid_sample_cubic"
func_sig = "NNTrait::grid_sample("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::Some(0),"
func_sig += "Option::Some(MODE::CUBIC),"
func_sig += "Option::None)"
make_test(
[x, grid], y, func_sig, name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement
def hamming_window(size, output_datatype=None, periodic=None) -> np.ndarray:
if periodic == 1:
N_1 = size
else:
N_1 = size - 1
ni = np.arange(size, dtype=output_datatype)
alpha = 25.0 / 46.0
beta = 1 - alpha
res = alpha - np.cos(ni * np.float64(np.pi).astype(output_datatype) * 2 / N_1).astype(output_datatype) * beta
return res.astype(output_datatype) |
class Hamming_window(RunAll):
@staticmethod
def fp8x23():
args = [4]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)
y = hamming_window(*args, np.float64)
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "hamming_window_fp8x23"
make_test(
[],
y,
f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))",
name
)
@staticmethod
def fp16x16():
args = [10]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)
y = hamming_window(*args, np.float16)
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "hamming_window_fp16x16"
make_test(
[],
y,
f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))",
name
) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement
def hann_window(size, output_datatype=None, periodic=None) -> np.ndarray:
if periodic == 1:
N_1 = size
else:
N_1 = size - 1
ni = np.arange(size, dtype=output_datatype)
res = np.sin((ni * np.float64(np.pi).astype(output_datatype) / N_1).astype(output_datatype)) ** 2
return res.astype(output_datatype) |
class Hann_window(RunAll):
@staticmethod
def fp8x23():
print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP8x23), Dtype.FP8x23))
args = [4]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)
y = hann_window(*args, np.float64)
print(y)
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "hann_window_fp8x23"
make_test(
[],
y,
f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))",
name
)
@staticmethod
def fp16x16():
print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16))
args = [10]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)
y = hann_window(*args, np.float16)
print(y)
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "hann_window_fp16x16"
make_test(
[],
y,
f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))",
name
) |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
class Hard_sigmoid(RunAll):
@staticmethod
def fp8x23():
alpha = 0.2
beta = 0.5
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32)
y = np.maximum(0, np.minimum(1, alpha * x + beta))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "hard_sigmoid_fp8x23"
make_test([x], y, "NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false))",
name, Trait.NN)
@staticmethod
def fp16x16():
alpha = 0.2
beta = 0.5
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32)
y = np.maximum(0, np.minimum(1, alpha * x + beta))
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "hard_sigmoid_fp16x16"
make_test([x], y, "NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false))",
name, Trait.NN)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Identity(RunAll):
@staticmethod
def identity_fP8x23():
def identity():
x = np.array([[1, 2], [3, 4]])
y = x
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "identity_fP8x23"
make_test(
[x], y, "input_0.identity()", name)
identity()
@staticmethod
def identity_fP16x16():
def identity():
x = np.array([[1, 2], [3, 4]])
y = x
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "identity_fP16x16"
make_test(
[x], y, "input_0.identity()", name)
identity()
@staticmethod
def identity_i8():
def identity():
x = np.array([[1, 2], [3, 4]])
y = x
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "identity_i8"
make_test(
[x], y, "input_0.identity()", name)
identity()
@staticmethod
def identity_i32():
def identity():
x = np.array([[1, 2], [3, 4]])
y = x
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "identity_i32"
make_test(
[x], y, "input_0.identity()", name)
identity()
@staticmethod
def identity_u32():
def identity():
x = np.array([[1, 2], [3, 4]])
y = x
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "identity_u32"
make_test(
[x], y, "input_0.identity()", name)
identity() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
INF = 2**32 - 1 |
class Is_inf(RunAll):
@staticmethod
def is_inf_u32():
def default():
input_0 = np.array([1, 0, INF, 8, -INF, INF], dtype=np.uint32)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.U32, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_inf_u32"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name)
default()
@staticmethod
def is_inf_i32():
def default():
input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_inf_i32"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name)
def positive():
input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32)
output = np.array([False, False, True, False, False, True], dtype=bool)
input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_pos_inf_i32"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name)
def negative():
input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32)
output = np.array([False, False, False, False, True, False], dtype=bool)
input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_neg_inf_i32"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Som |
e(0))", name)
default()
positive()
negative()
@staticmethod
def is_inf_i8():
def default():
input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int8)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_inf_i8"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name)
def positive():
input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32)
output = np.array([False, False, True, False, False, True], dtype=bool)
input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_pos_inf_i8"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name)
def negative():
input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32)
output = np.array([False, False, False, False, True, False], dtype=bool)
input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_neg_inf_i8"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name)
default()
positive()
negative()
@staticmethod
def is_inf_fp8x23():
def default():
input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp(
input_0.flatten(), FixedImpl.FP8x23))
output = Tensor(Dtype.BOOL, output.shape, output.flatt |
en())
name = "is_inf_fp8x23"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name)
def positive():
input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64)
output = np.array([False, False, True, False, False, True], dtype=bool)
input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_pos_inf_fp8x23"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name)
def negative():
input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64)
output = np.array([False, False, False, False, True, False], dtype=bool)
input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_neg_inf_fp8x23"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name)
default()
positive()
negative()
@staticmethod
def is_inf_fp16x16():
def default():
input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp(
input_0.flatten(), FixedImpl.FP16x16))
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_inf_fp16x16"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name)
def positive():
input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64)
output = np.array([False, False, True, False, False, True], dtype=bool)
input_0 = Tensor(Dtype.FP16x16 |
, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_pos_inf_fp16x16"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name)
def negative():
input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64)
output = np.array([False, False, False, False, True, False], dtype=bool)
input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten())
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_neg_inf_fp16x16"
make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name)
default()
positive()
negative() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
# NaN is represented with -0
NaN = -0
class Is_nan(RunAll):
@staticmethod
def is_nan_fp8x23():
def default():
input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp(
input_0.flatten(), FixedImpl.FP8x23))
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_nan_fp8x23"
make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name)
default()
@staticmethod
def is_nan_fp16x16():
def default():
input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64)
output = np.array([False, False, True, False, True, True], dtype=bool)
input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp(
input_0.flatten(), FixedImpl.FP16x16))
output = Tensor(Dtype.BOOL, output.shape, output.flatten())
name = "is_nan_fp16x16"
make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name)
default()
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def labelEncoder(
x,
default_float=None,
default_int64=None,
default_string=None,
keys_floats=None,
keys_int64s=None,
keys_strings=None,
values_floats=None,
values_int64s=None,
values_strings=None,
):
keys = keys_floats if keys_floats is not None else (keys_int64s if np.any(keys_int64s) else keys_strings)
values = values_floats if values_floats is not None else (values_int64s if np.any(values_int64s) else values_strings)
classes = dict(zip(keys, values))
if id(keys) == id(keys_floats):
cast = float
elif id(keys) == id(keys_int64s):
cast = int
else:
cast = str
if id(values) == id(values_floats):
defval = default_float
dtype = np.float32
elif id(values) == id(values_int64s):
defval = default_int64
dtype = np.int64
else:
defval = default_string
if not isinstance(defval, str):
defval = ""
dtype = np.str_
shape = x.shape
if len(x.shape) > 1:
x = x.flatten()
res = []
for i in range(0, x.shape[0]):
v = classes.get(cast(x[i]), defval)
res.append(v)
return np.array(res, dtype=dtype).reshape(shape) |
class Label_encoder(RunAll):
@staticmethod
def label_encoder_fp16x16():
def labelencoder():
def default():
x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3]).astype(np.int64)
keys = np.array([1, 2, 5, 6, ]).astype(np.int64)
values = np.array([11, 22, 55, 66]).astype(np.int64)
default = np.array(99).astype(np.int64)
y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
default = Tensor(Dtype.FP16x16, default.shape, to_fp(default.flatten(), FixedImpl.FP16x16))
keys = Tensor(Dtype.FP16x16, keys.shape, to_fp(keys.flatten(), FixedImpl.FP16x16))
values = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "label_encoder_fp16x16_3d_default"
make_test(
inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
keys:Option::None, keys_tensor: Option::Some(input_2),
values: Option::None, values_tensor: Option::Some(input_3))""",
name= name)
default()
labelencoder()
@staticmethod
def label_encoder_fp8x23():
def label_encoder():
def default():
x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int64)
keys = np.array([1, 2, 5, 6, 7]).astype(np.int64)
values = np.array([11, 22, 55, 66, 77]).astype(np.int64)
default = np.array(99).astype(np.int64)
y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) |
x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23))
default = Tensor(Dtype.FP8x23, default.shape, to_fp(default.flatten(), FixedImpl.FP8x23))
keys = Tensor(Dtype.FP8x23, keys.shape, to_fp(keys.flatten(), FixedImpl.FP8x23))
values = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "label_encoder_fp8x23_default"
make_test(
inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
keys:Option::None, keys_tensor: Option::Some(input_2),
values: Option::None, values_tensor: Option::Some(input_3))""",
name= name)
default()
label_encoder()
@staticmethod
def label_encoder_i8():
def label_encoder_3D():
def default():
x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int8)
keys = np.array([1, 2, 5, 6, 7]).astype(np.int8)
values = np.array([11, 22, 55, 66, 77]).astype(np.int8)
default = np.array(99).astype(np.int8)
y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
x = Tensor(Dtype.I8, x.shape, x.flatten())
default = Tensor(Dtype.I8, default.shape, default.flatten())
keys = Tensor(Dtype.I8, keys.shape, keys.flatten())
values = Tensor(Dtype.I8, values.shape, values.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "label_encoder_i8_default"
make_test(
inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_te |
nsor: Option::Some(input_1),
keys:Option::None, keys_tensor: Option::Some(input_2),
values: Option::None, values_tensor: Option::Some(input_3))""",
name= name)
default()
label_encoder_3D()
@staticmethod
def label_encoder_i32():
def label_encoder_3D():
def default():
x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int32)
keys = np.array([1, 2, 5, 6, 7]).astype(np.int32)
values = np.array([11, 22, 55, 66, 77]).astype(np.int32)
default = np.array(99).astype(np.int32)
y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
x = Tensor(Dtype.I32, x.shape, x.flatten())
default = Tensor(Dtype.I32, default.shape, default.flatten())
keys = Tensor(Dtype.I32, keys.shape, keys.flatten())
values = Tensor(Dtype.I32, values.shape, values.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "label_encoder_i32_default"
make_test(
inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
keys:Option::None, keys_tensor: Option::Some(input_2),
values: Option::None, values_tensor: Option::Some(input_3))""",
name= name)
default()
label_encoder_3D()
@staticmethod
def label_encoder_u32():
def label_encoder_3D():
def default():
x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.uint32)
keys = np.array([1, 2, 5, 6, 7]).astype(np.uint32)
values = np.array([11, 22, 55, 66, 77]).astype(np.uint32) |
default = np.array(99).astype(np.uint32)
y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default)
x = Tensor(Dtype.U32, x.shape, x.flatten())
default = Tensor(Dtype.U32, default.shape, default.flatten())
keys = Tensor(Dtype.U32, keys.shape, keys.flatten())
values = Tensor(Dtype.U32, values.shape, values.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "label_encoder_u32_default"
make_test(
inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1),
keys:Option::None, keys_tensor: Option::Some(input_2),
values: Option::None, values_tensor: Option::Some(input_3))""",
name= name)
default()
label_encoder_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
import numpy as np |
import onnx
from onnx.backend.test.case.base |
import Base
from onnx.backend.test.case.node |
import expect
def _layer_normalization(X, W, B, axis=-1, epsilon=1e-5):
X_shape = X.shape
X_rank = len(X_shape)
if axis < 0:
axis = axis + X_rank
unsqueezed_rank = X_rank - axis
reduction_shape = X_shape[0:axis] + (1,) * unsqueezed_rank
row_number = 1
col_number = 1
for i in range(X_rank):
if i < axis:
row_number *= X_shape[i]
else:
col_number *= X_shape[i]
x_mat = np.reshape(X, (row_number, col_number))
x_mean = np.sum(x_mat, axis=1, keepdims=True) / col_number
x_diff = x_mat - x_mean
x_squared_diff = x_diff * x_diff
variance = np.sum(x_squared_diff, axis=1, keepdims=True) / col_number
variance_eps = variance + epsilon
std_dev = np.sqrt(variance_eps)
inv_std_dev = np.reciprocal(std_dev)
y_mat = x_diff * inv_std_dev
Y = np.reshape(y_mat, X_shape) * W + B
X_mean = np.reshape(x_mean, reduction_shape)
X_inv_std_dev = np.reshape(inv_std_dev, reduction_shape)
return Y, X_mean, X_inv_std_dev
def calculate_normalized_shape(X_shape, axis):
X_rank = len(X_shape)
if axis < 0:
axis = axis + X_rank
return X_shape[axis:] |
class Layer_normalization(RunAll):
@staticmethod
def export4d() -> None:
X = np.random.randn(2, 3, 4, 5).astype(np.float32)
def case(axis: int) -> None:
normalized_shape = calculate_normalized_shape(X.shape, axis)
W = np.random.randn(*normalized_shape).astype(np.float32)
B = np.random.randn(*normalized_shape).astype(np.float32)
Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis)
if axis < 0:
name = f"layer_normalization_4d_axis_negative_{-axis}"
func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({-axis}),Option::None,Option::None)"
else:
name = f"layer_normalization_4d_axis{axis}"
func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({axis}),Option::None,Option::None)"
x = Tensor(Dtype.FP8x23, X.shape, to_fp(X.flatten(), FixedImpl.FP8x23))
w = Tensor(Dtype.FP8x23, W.shape, to_fp(W.flatten(), FixedImpl.FP8x23))
b = Tensor(Dtype.FP8x23, B.shape, to_fp(B.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, Y.shape, to_fp(Y.flatten(), FixedImpl.FP8x23))
make_test([x,w,b], y, func_sig, name)
for i in range(len(X.shape)):
case(i)
case(i - len(X.shape))
@staticmethod
def export_default_axis() -> None:
X = np.random.randn(2, 3, 4, 5).astype(np.float32)
normalized_shape = calculate_normalized_shape(X.shape, -1)
W = np.random.randn(*normalized_shape).astype(np.float32)
B = np.random.randn(*normalized_shape).astype(np.float32)
Y, mean, inv_std_dev = _layer_normalization(X, W, B)
x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16))
y = T |
ensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16))
name = "layer_normalization_default_axis"
make_test([x,w,b], y, "input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None)", name)
@staticmethod
def export3d_epsilon() -> None:
epsilon = 1e-1
X = np.random.randn(2, 3, 5).astype(np.float32)
def case(axis: int) -> None:
normalized_shape = calculate_normalized_shape(X.shape, axis)
W = np.random.randn(*normalized_shape).astype(np.float32)
B = np.random.randn(*normalized_shape).astype(np.float32)
Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis, epsilon)
if axis < 0:
name = f"layer_normalization_3d_axis_negative_{-axis}_epsilon"
func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({-axis}),Option::Some(FixedTrait::new(6554, false)),Option::None)"
else:
name = f"layer_normalization_3d_axis{axis}_epsilon"
func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({axis}),Option::Some(FixedTrait::new(6554, false)),Option::None)"
x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16))
make_test([x,w,b], y, func_sig, name)
for i in range(len(X.shape)):
case(i)
case(i - len(X.shape))
@staticmethod
def test_2d_example() -> None:
X = np.random.randn(3, 4).astype(np.float32)
def case(axis: int) -> None:
normalized_shape = calculate_normalized_shape(X.shape, axis)
W = np.random.randn(*normalized_shape).astype(np.float3 |
2)
B = np.random.randn(*normalized_shape).astype(np.float32)
Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis=axis)
node = onnx.helper.make_node(
"LayerNormalization",
inputs=["X", "W", "B"],
outputs=["Y", "Mean", "InvStdDev"],
axis=axis,
)
x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16))
name = "layer_normalization_test"
make_test([x,w,b], y, "input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None)", name)
case(-1) |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
import tensorflow as tf
class Leaky_relu(RunAll):
@staticmethod
def leaky_relu_fp8x23():
x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64)
layer = tf.keras.layers.LeakyReLU(alpha=0.1)
y = layer(x).numpy()
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "leaky_relu_fp8x23"
make_test([x], y, "NNTrait::leaky_relu(@input_0, @FixedTrait::new(838861, false))",
name, Trait.NN)
@staticmethod
def leaky_relu_fp16x16():
x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64)
layer = tf.keras.layers.LeakyReLU(alpha=0.1)
y = layer(x).numpy()
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "leaky_relu_fp16x16"
make_test([x], y, "NNTrait::leaky_relu(@input_0, @FixedTrait::new(6554, false))",
name, Trait.NN)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Less(RunAll):
@staticmethod
def less_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.less(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_u32"
make_test([x, y], z, "input_0.less(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 3, 1)).astype(np.uint32)
z = np.less(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_u32_broadcast"
make_test([x, y], z, "input_0.less(@input_1)", name)
default()
broadcast()
@staticmethod
def less_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.less(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_i32"
make_test([x, y], z, "input_0.less(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32)
z = np.less(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_i32_broadcast"
make_test([x, y], z, "input_0.less(@input_1)", name)
default()
broadcast()
@staticmethod |
def less_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.less(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_i8"
make_test([x, y], z, "input_0.less(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8)
z = np.less(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_i8_broadcast"
make_test([x, y], z, "input_0.less(@input_1)", name)
default()
broadcast()
@staticmethod
def less_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.less(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_fp8x23"
make_test([x, y], z, "input_0.less(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = np.less(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less |
_fp8x23_broadcast"
make_test([x, y], z, "input_0.less(@input_1)", name)
default()
broadcast()
@staticmethod
def less_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.less(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_fp16x16"
make_test([x, y], z, "input_0.less(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = np.less(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_fp16x16_broadcast"
make_test([x, y], z, "input_0.less(@input_1)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Less_equal(RunAll):
@staticmethod
def less_equal_u32():
def default():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
z = np.less_equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_u32"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.less_equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_u32_broadcast"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def less_equal_i32():
def default():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
z = np.less_equal(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_i32"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int32)
z = np.less_equal(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_i32_broadcast"
make_test([x, y], z, "input_0.less_equal(@input_1)", na |