text
stringlengths 1
2.05k
|
---|
me)
default()
broadcast()
@staticmethod
def less_equal_i8():
def default():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
z = np.less_equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_i8"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int8)
z = np.less_equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_i8_broadcast"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def less_equal_fp8x23():
def default():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
z = np.less_equal(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_fp8x23"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.less_equal(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp( |
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_fp8x23_broadcast"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
default()
broadcast()
@staticmethod
def less_equal_fp16x16():
def default():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
z = np.less_equal(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_fp16x16"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.less_equal(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "less_equal_fp16x16_broadcast"
make_test([x, y], z, "input_0.less_equal(@input_1)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
from typing |
import Optional
def linear(
i: np.ndarray,
w: np.ndarray,
b: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.dot(i, w.T) + b |
class Linear(RunAll):
@staticmethod
def linear_i32():
i = np.random.randint(-5, 9, (3)).astype(np.int32)
w = np.random.randint(-5, 9, (2, 3)).astype(np.int32)
b = np.random.randint(-5, 9, (2)).astype(np.int32)
y = linear(i, w, b)
i = Tensor(Dtype.I32, i.shape, i.flatten())
w = Tensor(Dtype.I32, w.shape, w.flatten())
b = Tensor(Dtype.I32, b.shape, b.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "linear_i32"
make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)",
name, Trait.NN)
@staticmethod
def linear_i8():
i = np.random.randint(-3, 3, (3)).astype(np.int8)
w = np.random.randint(-3, 3, (2, 3)).astype(np.int8)
b = np.random.randint(-3, 3, (2)).astype(np.int8)
y = linear(i, w, b)
i = Tensor(Dtype.I8, i.shape, i.flatten())
w = Tensor(Dtype.I8, w.shape, w.flatten())
b = Tensor(Dtype.I8, b.shape, b.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "linear_i8"
make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)",
name, Trait.NN)
@staticmethod
def linear_u32():
i = np.random.randint(0, 6, (3)).astype(np.uint32)
w = np.random.randint(0, 6, (2, 3)).astype(np.uint32)
b = np.random.randint(0, 6, (2)).astype(np.uint32)
y = linear(i, w, b)
i = Tensor(Dtype.U32, i.shape, i.flatten())
w = Tensor(Dtype.U32, w.shape, w.flatten())
b = Tensor(Dtype.U32, b.shape, b.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "linear_u32"
make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)",
name, Trait.NN)
@staticmethod
def linear_fp8x23():
i = np.random.uniform(-5, 7, (3)).astype(np.float64)
w = np.random.uniform(-5, 7, (2, 3)).astype(np.float64)
b = np.random.uniform(-5, 7, (2)).astype(np.float64) |
y = linear(i, w, b)
i = Tensor(Dtype.FP8x23, i.shape, to_fp(
i.flatten(), FixedImpl.FP8x23))
w = Tensor(Dtype.FP8x23, w.shape, to_fp(
w.flatten(), FixedImpl.FP8x23))
b = Tensor(Dtype.FP8x23, b.shape, to_fp(
b.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "linear_fp8x23"
make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)",
name, Trait.NN)
@staticmethod
def linear_fp16x16():
i = np.random.uniform(-5, 7, (3)).astype(np.float64)
w = np.random.uniform(-5, 7, (2, 3)).astype(np.float64)
b = np.random.uniform(-5, 7, (2)).astype(np.float64)
y = linear(i, w, b)
i = Tensor(Dtype.FP16x16, i.shape, to_fp(
i.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(
w.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "linear_fp16x16"
make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)",
name, Trait.NN) |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Log(RunAll):
@staticmethod
def log_fp8x23():
x = np.random.uniform(1, 127, (2, 2)).astype(np.float64)
y = np.log(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "log_fp8x23"
make_test([x], y, "input_0.log()", name)
@staticmethod
def log_fp16x16():
x = np.random.uniform(1, 127, (2, 2)).astype(np.float64)
y = np.log(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "log_fp16x16"
make_test([x], y, "input_0.log()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def logsoftmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
x_max = np.max(x, axis=axis, keepdims=True)
tmp = np.exp(x - x_max)
s = np.sum(tmp, axis=axis, keepdims=True)
return (x - x_max) - np.log(s) |
class Logsoftmax(RunAll):
def logsoftmax_fp8x23():
def axis_0():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = logsoftmax(x, 0)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "logsoftmax_fp8x23_axis_0"
make_test([x], y, "NNTrait::logsoftmax(@input_0, 0)",
name, Trait.NN)
def axis_1():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = logsoftmax(x, 1)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "logsoftmax_fp8x23_axis_1"
make_test([x], y, "NNTrait::logsoftmax(@input_0, 1)",
name, Trait.NN)
axis_0()
axis_1()
def logsoftmax_fp16x16():
def axis_0():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = logsoftmax(x, 0)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "logsoftmax_fp16x16_axis_0"
make_test([x], y, "NNTrait::logsoftmax(@input_0, 0)",
name, Trait.NN)
def axis_1():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = logsoftmax(x, 1)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "logsoftmax_fp16x16_axis_1"
make_test([x], y, "NNTrait::logsoftmax(@input_0, 1)",
name, Trait.NN) |
axis_0()
axis_1() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Matmul(RunAll):
@staticmethod
def matmul_u32():
def matmul_1D():
a = np.random.randint(0, 255, (3)).astype(np.uint32)
b = np.random.randint(0, 255, (3)).astype(np.uint32)
y = np.matmul(a, b).reshape((1))
a = Tensor(Dtype.U32, a.shape, a.flatten())
b = Tensor(Dtype.U32, b.shape, b.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "matmul_u32_1d"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x2():
a = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
b = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
y = np.matmul(a, b)
a = Tensor(Dtype.U32, a.shape, a.flatten())
b = Tensor(Dtype.U32, b.shape, b.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "matmul_u32_2x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x1():
a = np.random.randint(0, 255, (2, 1)).astype(np.uint32)
b = np.random.randint(0, 255, (1, 2)).astype(np.uint32)
y = np.matmul(a, b)
a = Tensor(Dtype.U32, a.shape, a.flatten())
b = Tensor(Dtype.U32, b.shape, b.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "matmul_u32_2x1"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_1x2():
a = np.random.randint(0, 255, (1, 2)).astype(np.uint32)
b = np.random.randint(0, 255, (2, 1)).astype(np.uint32)
y = np.matmul(a, b)
a = Tensor(Dtype.U32, a.shape, a.flatten())
b = Tensor(Dtype.U32, b.shape, b.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "matmul_u32_1x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
matmul_1D()
matmul_2x2() |
matmul_2x1()
matmul_1x2()
@staticmethod
def matmul_i32():
def matmul_1D():
a = np.random.randint(-127, 127, (3)).astype(np.int32)
b = np.random.randint(-127, 127, (3)).astype(np.int32)
y = np.matmul(a, b).reshape((1))
a = Tensor(Dtype.I32, a.shape, a.flatten())
b = Tensor(Dtype.I32, b.shape, b.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "matmul_i32_1d"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x2():
a = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
b = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = np.matmul(a, b)
a = Tensor(Dtype.I32, a.shape, a.flatten())
b = Tensor(Dtype.I32, b.shape, b.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "matmul_i32_2x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x1():
a = np.random.randint(-127, 127, (2, 1)).astype(np.int32)
b = np.random.randint(-127, 127, (1, 2)).astype(np.int32)
y = np.matmul(a, b)
a = Tensor(Dtype.I32, a.shape, a.flatten())
b = Tensor(Dtype.I32, b.shape, b.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "matmul_i32_2x1"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_1x2():
a = np.random.randint(-127, 127, (1, 2)).astype(np.int32)
b = np.random.randint(-127, 127, (2, 1)).astype(np.int32)
y = np.matmul(a, b)
a = Tensor(Dtype.I32, a.shape, a.flatten())
b = Tensor(Dtype.I32, b.shape, b.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "matmul_i32_1x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name) |
matmul_1D()
matmul_2x2()
matmul_2x1()
matmul_1x2()
@staticmethod
def matmul_i8():
def matmul_1D():
a = np.random.randint(-4, 5, (3)).astype(np.int8)
b = np.random.randint(-4, 5, (3)).astype(np.int8)
y = np.matmul(a, b).reshape((1))
a = Tensor(Dtype.I8, a.shape, a.flatten())
b = Tensor(Dtype.I8, b.shape, b.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "matmul_i8_1d"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x2():
a = np.random.randint(-4, 5, (2, 2)).astype(np.int8)
b = np.random.randint(-4, 5, (2, 2)).astype(np.int8)
y = np.matmul(a, b)
a = Tensor(Dtype.I8, a.shape, a.flatten())
b = Tensor(Dtype.I8, b.shape, b.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "matmul_i8_2x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x1():
a = np.random.randint(-4, 5, (2, 1)).astype(np.int8)
b = np.random.randint(-4, 5, (1, 2)).astype(np.int8)
y = np.matmul(a, b)
a = Tensor(Dtype.I8, a.shape, a.flatten())
b = Tensor(Dtype.I8, b.shape, b.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "matmul_i8_2x1"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_1x2():
a = np.random.randint(-4, 5, (1, 2)).astype(np.int8)
b = np.random.randint(-4, 5, (2, 1)).astype(np.int8)
y = np.matmul(a, b)
a = Tensor(Dtype.I8, a.shape, a.flatten())
b = Tensor(Dtype.I8, b.shape, b.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "matmul_i8_1x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
matmul_1D() |
matmul_2x2()
matmul_2x1()
matmul_1x2()
@staticmethod
def matmul_fp8x23():
def matmul_1D():
a = np.random.randint(-3, 4, (3)).astype(np.int64)
b = np.random.randint(-3, 4, (3)).astype(np.int64)
y = np.matmul(a, b).reshape((1))
a = Tensor(Dtype.FP8x23, a.shape, to_fp(
a.flatten(), FixedImpl.FP8x23))
b = Tensor(Dtype.FP8x23, b.shape, to_fp(
b.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "matmul_fp8x23_1d"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x2():
a = np.random.randint(-3, 4, (2, 2)).astype(np.int64)
b = np.random.randint(-3, 4, (2, 2)).astype(np.int64)
y = np.matmul(a, b)
a = Tensor(Dtype.FP8x23, a.shape, to_fp(
a.flatten(), FixedImpl.FP8x23))
b = Tensor(Dtype.FP8x23, b.shape, to_fp(
b.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "matmul_fp8x23_2x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x1():
a = np.random.randint(-3, 4, (2, 1)).astype(np.int64)
b = np.random.randint(-3, 4, (1, 2)).astype(np.int64)
y = np.matmul(a, b)
a = Tensor(Dtype.FP8x23, a.shape, to_fp(
a.flatten(), FixedImpl.FP8x23))
b = Tensor(Dtype.FP8x23, b.shape, to_fp(
b.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "matmul_fp8x23_2x1"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_1x2():
a = np.random.randint(-3, 4, (1, 2)).astype( |
np.int64)
b = np.random.randint(-3, 4, (2, 1)).astype(np.int64)
y = np.matmul(a, b)
a = Tensor(Dtype.FP8x23, a.shape, to_fp(
a.flatten(), FixedImpl.FP8x23))
b = Tensor(Dtype.FP8x23, b.shape, to_fp(
b.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "matmul_fp8x23_1x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
matmul_1D()
matmul_2x2()
matmul_2x1()
matmul_1x2()
@staticmethod
def matmul_fp16x16():
def matmul_1D():
a = np.random.randint(-3, 4, (3)).astype(np.int64)
b = np.random.randint(-3, 4, (3)).astype(np.int64)
y = np.matmul(a, b).reshape((1))
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "matmul_fp16x16_1d"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x2():
a = np.random.randint(-3, 4, (2, 2)).astype(np.int64)
b = np.random.randint(-3, 4, (2, 2)).astype(np.int64)
y = np.matmul(a, b)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "matmul_fp16x16_2x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_2x1():
a = np.random.randint(-3, 4, (2, 1)).astype(np.int64)
b = np.random.randint( |
-3, 4, (1, 2)).astype(np.int64)
y = np.matmul(a, b)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "matmul_fp16x16_2x1"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
def matmul_1x2():
a = np.random.randint(-3, 4, (1, 2)).astype(np.int64)
b = np.random.randint(-3, 4, (2, 1)).astype(np.int64)
y = np.matmul(a, b)
a = Tensor(Dtype.FP16x16, a.shape, to_fp(
a.flatten(), FixedImpl.FP16x16))
b = Tensor(Dtype.FP16x16, b.shape, to_fp(
b.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "matmul_fp16x16_1x2"
make_test(
[a, b], y, "input_0.matmul(@input_1)", name)
matmul_1D()
matmul_2x2()
matmul_2x1()
matmul_1x2() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Max(RunAll):
@staticmethod
def max_u32_two_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.maximum(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "max_u32_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.maximum(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "max_u32_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def max_i32_two_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
z = np.maximum(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "max_i32_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.int32)
y = np.random.randint(0, 6, (1, 2)).astype(np.int32)
z = np.maximum(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "max_i32_broadca |
st_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def max_i8_two_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
z = np.maximum(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "max_i8_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.int8)
y = np.random.randint(0, 6, (1, 2)).astype(np.int8)
z = np.maximum(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "max_i8_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def max_fp8x23_two_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.maximum(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "max_fp8x23_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, |
(1, 2)).astype(np.float64)
z = np.maximum(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "max_fp8x23_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def max_fp16x16_two_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.maximum(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "max_fp16x16_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.maximum(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "max_fp16x16_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def max_u32_three_tensors():
def default():
x = np.rand |
om.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
m = Tensor(Dtype.U32, m.shape, m.flatten())
name = "max_u32_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.random.randint(0, 6, (1, 1)).astype(np.uint32)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
m = Tensor(Dtype.U32, m.shape, m.flatten())
name = "max_u32_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def max_i32_three_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
m = Tensor(Dtype.I32, m.shape, m.flatten())
name = "max_i32_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.rand |
om.randint(0, 6, (2, 2)).astype(np.int32)
y = np.random.randint(0, 6, (1, 2)).astype(np.int32)
z = np.random.randint(0, 6, (1, 1)).astype(np.int32)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
m = Tensor(Dtype.I32, m.shape, m.flatten())
name = "max_i32_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def max_i8_three_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
m = Tensor(Dtype.I8, m.shape, m.flatten())
name = "max_i8_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.int8)
y = np.random.randint(0, 6, (1, 2)).astype(np.int8)
z = np.random.randint(0, 6, (1, 1)).astype(np.int8)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
m = Tensor(Dtype.I8, m.shape, m.flatten())
name = "max_i8_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def max_f |
p8x23_three_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
m = Tensor(Dtype.FP8x23, m.shape, to_fp(
m.flatten(), FixedImpl.FP8x23))
name = "max_fp8x23_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.random.randint(-3, 3, (1, 1)).astype(np.float64)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
m = Tensor(Dtype.FP8x23, m.shape, to_fp(
m.flatten(), FixedImpl.FP8x23))
name = "max_fp8x23_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def max_fp16x16_three_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) |
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
m = Tensor(Dtype.FP16x16, m.shape, to_fp(
m.flatten(), FixedImpl.FP16x16))
name = "max_fp16x16_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.random.randint(-3, 3, (1, 1)).astype(np.float64)
m = np.maximum(np.maximum(x, y), z)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
m = Tensor(Dtype.FP16x16, m.shape, to_fp(
m.flatten(), FixedImpl.FP16x16))
name = "max_fp16x16_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Min(RunAll):
@staticmethod
def min_u32_two_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.minimum(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "min_u32_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.minimum(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "min_u32_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def min_i32_two_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
z = np.minimum(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "min_i32_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.int32)
y = np.random.randint(0, 6, (1, 2)).astype(np.int32)
z = np.minimum(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "min_i32_broadca |
st_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def min_i8_two_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
z = np.minimum(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "min_i8_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.int8)
y = np.random.randint(0, 6, (1, 2)).astype(np.int8)
z = np.minimum(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "min_i8_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def min_fp8x23_two_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.minimum(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "min_fp8x23_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, |
(1, 2)).astype(np.float64)
z = np.minimum(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "min_fp8x23_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def min_fp16x16_two_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.minimum(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "min_fp16x16_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.minimum(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "min_fp16x16_broadcast_two_tensors"
make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name)
default()
broadcast()
@staticmethod
def min_u32_three_tensors():
def default():
x = np.rand |
om.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
m = Tensor(Dtype.U32, m.shape, m.flatten())
name = "min_u32_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.random.randint(0, 6, (1, 1)).astype(np.uint32)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
m = Tensor(Dtype.U32, m.shape, m.flatten())
name = "min_u32_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def min_i32_three_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
m = Tensor(Dtype.I32, m.shape, m.flatten())
name = "min_i32_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.rand |
om.randint(0, 6, (2, 2)).astype(np.int32)
y = np.random.randint(0, 6, (1, 2)).astype(np.int32)
z = np.random.randint(0, 6, (1, 1)).astype(np.int32)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
m = Tensor(Dtype.I32, m.shape, m.flatten())
name = "min_i32_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def min_i8_three_tensors():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
m = Tensor(Dtype.I8, m.shape, m.flatten())
name = "min_i8_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.int8)
y = np.random.randint(0, 6, (1, 2)).astype(np.int8)
z = np.random.randint(0, 6, (1, 1)).astype(np.int8)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
m = Tensor(Dtype.I8, m.shape, m.flatten())
name = "min_i8_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def min_f |
p8x23_three_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
m = Tensor(Dtype.FP8x23, m.shape, to_fp(
m.flatten(), FixedImpl.FP8x23))
name = "min_fp8x23_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.random.randint(-3, 3, (1, 1)).astype(np.float64)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
m = Tensor(Dtype.FP8x23, m.shape, to_fp(
m.flatten(), FixedImpl.FP8x23))
name = "min_fp8x23_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
default()
broadcast()
@staticmethod
def min_fp16x16_three_tensors():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) |
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
m = Tensor(Dtype.FP16x16, m.shape, to_fp(
m.flatten(), FixedImpl.FP16x16))
name = "min_fp16x16_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.random.randint(-3, 3, (1, 1)).astype(np.float64)
m = np.minimum(np.minimum(x, y), z)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
m = Tensor(Dtype.FP16x16, m.shape, to_fp(
m.flatten(), FixedImpl.FP16x16))
name = "min_fp16x16_broadcast_three_tensors"
make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Mul(RunAll):
@staticmethod
def mul_u32():
def default():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
z = x * y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "mul_u32"
make_test([x, y], z, "input_0 * input_1", name)
def broadcast():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (1, 3, 1)).astype(np.uint32)
z = x * y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "mul_u32_broadcast"
make_test([x, y], z, "input_0 * input_1", name)
default()
broadcast()
@staticmethod
def mul_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = x * y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "mul_i32"
make_test([x, y], z, "input_0 * input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32)
z = x * y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "mul_i32_broadcast"
make_test([x, y], z, "input_0 * input_1", name)
default()
broadcast()
@staticmethod
def mul_i8():
def default():
x = np.ran |
dom.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = x * y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "mul_i8"
make_test([x, y], z, "input_0 * input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8)
z = x * y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "mul_i8_broadcast"
make_test([x, y], z, "input_0 * input_1", name)
default()
broadcast()
@staticmethod
def mul_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x * y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "mul_fp8x23"
make_test([x, y], z, "input_0 * input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = x * y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "mul_fp8x23_broadcast" |
make_test([x, y], z, "input_0 * input_1", name)
default()
broadcast()
@staticmethod
def mul_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x * y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "mul_fp16x16"
make_test([x, y], z, "input_0 * input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = x * y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "mul_fp16x16_broadcast"
make_test([x, y], z, "input_0 * input_1", name)
default()
broadcast() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Neg(RunAll):
@staticmethod
def neg_i32():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = np.negative(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "neg_i32"
make_test([x], y, "input_0.neg()", name)
@staticmethod
def neg_i8():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int8)
y = np.negative(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "neg_i8"
make_test([x], y, "input_0.neg()", name)
@staticmethod
def neg_fp8x23():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int64), FixedImpl.FP8x23)
y = np.negative(x)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "neg_fp8x23"
make_test([x], y, "input_0.neg()", name)
@staticmethod
def neg_fp16x16():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int64), FixedImpl.FP16x16)
y = np.negative(x)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "neg_fp16x16"
make_test([x], y, "input_0.neg()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Nonzero(RunAll):
@staticmethod
def nonzero_u32():
def nonzero_2D():
x = np.random.randint(0, 255, (2, 4)).astype(np.uint32)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_u32_2d"
make_test(
[x], y, "input_0.nonzero()", name)
def nonzero_3D():
x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_u32_3d"
make_test(
[x], y, "input_0.nonzero()", name)
nonzero_2D()
nonzero_3D()
@staticmethod
def nonzero_i32():
def nonzero_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int32)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_i32_2d"
make_test(
[x], y, "input_0.nonzero()", name)
def nonzero_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_i32_3d"
make_test(
[x], y, "input_0.nonzero()", name)
nonzero_2D()
nonzero_3D()
@staticmethod
def nonzero_i8():
def nonzero_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int8)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_i8_2d" |
make_test(
[x], y, "input_0.nonzero()", name)
def nonzero_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_i8_3d"
make_test(
[x], y, "input_0.nonzero()", name)
nonzero_2D()
nonzero_3D()
@staticmethod
def nonzero_fp8x23():
def nonzero_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP8x23)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_fp8x23_2d"
make_test(
[x], y, "input_0.nonzero()", name)
def nonzero_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP8x23)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_fp8x23_3d"
make_test(
[x], y, "input_0.nonzero()", name)
nonzero_2D()
nonzero_3D()
@staticmethod
def nonzero_fp16x16():
def nonzero_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP16x16)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_fp16x16_2d"
make_test(
[x], y, "input_0.nonzero()", name)
def nonzero_3D():
x = t |
o_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP16x16)
y = np.array(np.nonzero(x), dtype=np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "nonzero_fp16x16_3d"
make_test(
[x], y, "input_0.nonzero()", name)
nonzero_2D()
nonzero_3D() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_node, make_test, Tensor, Dtype
class Not(RunAll):
@staticmethod
def not_bool():
x = np.random.uniform(True, False, (1, 1)).astype(bool)
y = ~(x)
x = Tensor(Dtype.Bool, x.shape, x.flatten())
y = Tensor(Dtype.Bool, y.shape, y.flatten())
name = "not_bool"
make_node([x], [y], name)
make_test([x], y, "input_0", name) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Or(RunAll):
@staticmethod
def or_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.logical_or(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_u32"
make_test([x, y], z, "input_0.or(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.logical_or(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_u32_broadcast"
make_test([x, y], z, "input_0.or(@input_1)", name)
default()
broadcast()
@staticmethod
def or_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.logical_or(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_i32"
make_test([x, y], z, "input_0.or(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int32)
z = np.logical_or(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_i32_broadcast"
make_test([x, y], z, "input_0.or(@input_1)", name)
default()
broadcast()
@staticmethod
def or_i8( |
):
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.logical_or(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_i8"
make_test([x, y], z, "input_0.or(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int8)
z = np.logical_or(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_i8_broadcast"
make_test([x, y], z, "input_0.or(@input_1)", name)
default()
broadcast()
@staticmethod
def or_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.logical_or(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_fp8x23"
make_test([x, y], z, "input_0.or(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_or(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_fp8x23_broadcas |
t"
make_test([x, y], z, "input_0.or(@input_1)", name)
default()
broadcast()
@staticmethod
def or_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.logical_or(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_fp16x16"
make_test([x, y], z, "input_0.or(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_or(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "or_fp16x16_broadcast"
make_test([x, y], z, "input_0.or(@input_1)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Pow(RunAll):
@staticmethod
def pow_fp8x23():
def default():
x = np.array([1, 2, 3]).astype(np.float64)
y = np.array([1, 2, 3]).astype(np.float64)
z = np.array(pow(x, y), dtype=np.float64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "pow_fp8x23"
make_test([x, y], z, "input_0.pow(@input_1)", name)
def broadcast():
x = np.array([1, 2, 3]).astype(np.float64)
y = np.array([2]).astype(np.float64)
z = np.array(pow(x, y), dtype=np.float64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "pow_fp8x23_broadcast"
make_test([x, y], z, "input_0.pow(@input_1)", name)
default()
broadcast()
@staticmethod
def and_fp16x16():
def default():
x = np.array([1, 2, 3]).astype(np.float64)
y = np.array([1, 2, 3]).astype(np.float64)
z = np.array(pow(x, y), dtype=np.float64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "pow_fp16x16"
make_test([x, y], z, "input_0.pow(@input_1)", name)
def broadcast():
x = np.array([1, 2, 3]).astype(np.float64)
y = np.array([2]).astype(np.float6 |
4)
z = np.array(pow(x, y), dtype=np.float64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "pow_fp16x16_broadcast"
make_test([x, y], z, "input_0.pow(@input_1)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def random_uniform_like(x: np.ndarray, high: int=1,low: int=0,seed: int=25) ->np.ndarray:
dtype = np.float64
if seed is None or np.isnan(seed):
state = np.random.RandomState()
else:
state = np.random.RandomState(seed=int(seed))
res = state.rand(*x.shape).astype(dtype)
res *= high - low
res += low
return (res.astype(dtype),)
def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]:
match dtype:
case Dtype.FP8x23:
return ["Option::Some(FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"})" for x in data.flatten()]
case Dtype.FP16x16:
return ["Option::Some(FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"})" for x in data.flatten()]
case Dtype.U32:
return [f"Option::Some({int(x)})" for x in data.flatten()] |
class Random_uniform_like(RunAll):
@staticmethod
def fp8x23():
x = np.random.uniform(1, 10, (1, 2, 2, 4)).astype(np.float64)
y = random_uniform_like(x)
args = [10, 1]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y[0].shape, to_fp(
y[0].flatten(), FixedImpl.FP8x23))
name = "random_uniform_like_fp8x23"
make_test(
[x],
y,
f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)}, Option::Some(354145))",
name
)
@staticmethod
def fp16x16():
x = np.random.uniform(1, 10, (1, 2, 2, 4)).astype(np.float16)
y = random_uniform_like(x)
args = [10, 1]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y[0].shape, to_fp(
y[0].flatten(), FixedImpl.FP16x16))
name = "random_uniform_like_fp16x16"
make_test(
[x],
y,
f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)}, Option::Some(354145))",
name
) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement |
class Range(RunAll):
@staticmethod
def fp8x23():
args = [1, 5, 0.3]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)
y = np.arange(*args)
print(y)
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "range_fp8x23"
make_test(
[],
y,
f"TensorTrait::range({','.join(args_str)})",
name,
)
@staticmethod
def fp16x16():
args = [1, 25, 3]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)
y = np.arange(*args)
print(y)
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "range_fp16x16"
make_test(
[],
y,
f"TensorTrait::range({','.join(args_str)})",
name,
)
@staticmethod
def i8():
args = [-1, 25, 3]
args_str = get_data_statement(np.array(args).flatten(), Dtype.I8)
y = np.arange(*args)
print(y)
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "range_i8"
make_test(
[],
y,
f"TensorTrait::range({','.join(args_str)})",
name,
)
@staticmethod
def i32():
args = [21, 2, -3]
args_str = get_data_statement(np.array(args).flatten(), Dtype.I32)
y = np.arange(*args)
print(y)
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "range_i32"
make_test(
[],
y,
f"TensorTrait::range({','.join(args_str)})",
name,
)
@staticmethod
def u32():
args = [1, 25, 3]
args_str = get_data_statement(np.array(a |
rgs).flatten(), Dtype.U32)
y = np.arange(*args)
print(y)
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "range_u32"
make_test(
[],
y,
f"TensorTrait::range({','.join(args_str)})",
name,
) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
import numpy as np |
class Reduce_l1(RunAll):
@staticmethod
def reduce_l1_fp8x23():
def reduce_l1_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "reduce_l1_fp8x23_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, false)", name)
def reduce_l1_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "reduce_l1_fp8x23_export_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, true)", name)
def reduce_l1_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "reduce_l1_fp8x23_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_l1(0, true)", name)
reduce_l1_export_do_not_keepdims()
reduce_l1_export_keepdims()
reduce_l1_a |
xis_0()
@staticmethod
def reduce_l1_fp16x16():
def reduce_l1_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "reduce_l1_fp16x16_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, false)", name)
def reduce_l1_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "reduce_l1_fp16x16_export_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, true)", name)
def reduce_l1_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "reduce_l1_fp16x16_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_l1(0, true)", name)
reduce_l1_export_do_not_keepdims()
reduce_l1_export_keepdims()
reduce_l1_axis_0( |
)
@staticmethod
def reduce_l1_i8():
def reduce_l1_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int8)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "reduce_l1_i8_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, false)", name)
def reduce_l1_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int8)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "reduce_l1_i8_export_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, true)", name)
def reduce_l1_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int8)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "reduce_l1_i8_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_l1(0, true)", name)
reduce_l1_export_do_not_keepdims()
reduce_l1_export_keepdims()
reduce_l1_axis_0()
@staticmethod
def reduce_l1_i32():
def reduce_ |
l1_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int32)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "reduce_l1_i32_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, false)", name)
def reduce_l1_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int32)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "reduce_l1_i32_export_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, true)", name)
def reduce_l1_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int32)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "reduce_l1_i32_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_l1(0, true)", name)
reduce_l1_export_do_not_keepdims()
reduce_l1_export_keepdims()
reduce_l1_axis_0()
@staticmethod
def reduce_l1_u32():
def reduce_l1_export_do_not_keepdims():
shape |
= [3, 2, 2]
axes = np.array([2], dtype=np.uint32)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "reduce_l1_u32_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, false)", name)
def reduce_l1_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.uint32)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "reduce_l1_u32_export_keepdims"
make_test(
[x], y, "input_0.reduce_l1(2, true)", name)
def reduce_l1_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.uint32)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32)
y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "reduce_l1_u32_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_l1(0, true)", name)
reduce_l1_export_do_not_keepdims()
reduce_l1_export_keepdims()
reduce_l1_axis_0() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl |
import numpy as np |
class Reduce_l2(RunAll):
@staticmethod
def reduce_l2_fp8x23():
def reduce_l2_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=False)).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "reduce_l2_fp8x23_export_do_not_keepdims"
make_node([x], [y], name)
make_test(
[x], y, "input_0.reduce_l2(2, false)", name)
def reduce_l2_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "reduce_l2_fp8x23_export_keepdims"
make_node([x], [y], name)
make_test(
[x], y, "input_0.reduce_l2(2, true)", name)
def reduce_l2_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "reduce_l2_fp8x23_export_negative_axes_keepdims"
make_node([x], [y], name)
make_test(
[x], y, "input_0.reduce_l2(0, true)", nam |
e)
reduce_l2_export_do_not_keepdims()
reduce_l2_export_keepdims()
reduce_l2_axis_0()
@staticmethod
def reduce_l2_fp16x16():
def reduce_l2_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=False)).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "reduce_l2_fp16x16_export_do_not_keepdims"
make_node([x], [y], name)
make_test(
[x], y, "input_0.reduce_l2(2, false)", name)
def reduce_l2_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "reduce_l2_fp16x16_export_keepdims"
make_node([x], [y], name)
make_test(
[x], y, "input_0.reduce_l2(2, true)", name)
def reduce_l2_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "reduce_l2_fp16x16_export_negative_axes_keepdims"
m |
ake_node([x], [y], name)
make_test(
[x], y, "input_0.reduce_l2(0, true)", name)
reduce_l2_export_do_not_keepdims()
reduce_l2_export_keepdims()
reduce_l2_axis_0()
@staticmethod
def reduce_l2_complex64():
def reduce_l2_axis_0():
shape = [2, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j, 4.- 1.j]), shape)
y = np.sqrt(np.sum(a=np.square(abs(x)), axis=tuple(axes), keepdims=True))
print(to_fp(x.flatten(), FixedImpl.FP64x64))
x = Tensor(Dtype.COMPLEX64, x.shape, to_fp(
x.flatten(), FixedImpl.FP64x64))
y = Tensor(Dtype.COMPLEX64, y.shape, to_fp(
y.flatten(), FixedImpl.FP64x64))
name = "reduce_l2_complex64_axis_0"
make_test(
[x], y, "input_0.reduce_l2(0, true)", name)
reduce_l2_axis_0() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Reduce_log_sum(RunAll):
@staticmethod
def reduce_log_sum_fp8x23():
def reduce_log_sum_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=False))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "reduce_log_sum_fp8x23_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, false)", name)
def reduce_log_sum_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "reduce_log_sum_fp8x23_export_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, true)", name)
def reduce_log_sum_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1), shape)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "reduce_log_sum_fp8x23_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduc |
e_log_sum(0, true)", name)
reduce_log_sum_export_do_not_keepdims()
reduce_log_sum_export_keepdims()
reduce_log_sum_axis_0()
@staticmethod
def reduce_log_sum_fp16x16():
def reduce_log_sum_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=False))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "reduce_log_sum_fp16x16_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, false)", name)
def reduce_log_sum_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "reduce_log_sum_fp16x16_export_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, true)", name)
def reduce_log_sum_axis_0():
shape = [2, 2, 2]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, t |
o_fp(
y.flatten(), FixedImpl.FP8x23))
name = "reduce_log_sum_fp16x16_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(0, true)", name)
reduce_log_sum_export_do_not_keepdims()
reduce_log_sum_export_keepdims()
reduce_log_sum_axis_0() |
import numpy as np
from nodegen.node |